diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml
index 0bdbaaa8e..98b2a668d 100644
--- a/.github/workflows/black.yml
+++ b/.github/workflows/black.yml
@@ -7,4 +7,4 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - uses: psf/black@21.12b0
\ No newline at end of file
+ - uses: psf/black@stable
\ No newline at end of file
diff --git a/.github/workflows/build-containers.yml b/.github/workflows/build-containers.yml
deleted file mode 100644
index 861bee40c..000000000
--- a/.github/workflows/build-containers.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-name: Build and Push Containers
-
-on:
- release:
- types:
- - created
- workflow_dispatch:
-
-env:
- REGISTRY_USER: igibson
- IMAGE_REGISTRY: docker.io
- REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
-
-jobs:
- push_to_registry:
- name: Push images to docker.io
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Log in to docker.io
- uses: redhat-actions/podman-login@v1
- with:
- username: ${{ env.REGISTRY_USER }}
- password: ${{ env.REGISTRY_PASSWORD }}
- registry: ${{ env.IMAGE_REGISTRY }}
-
- - name: install podman-compose
- run: pip3 install https://github.com/containers/podman-compose/archive/devel.tar.gz
-
- - name: Build and push images to quay
- run: |
- cd docker
- podman-compose -f compose-build.yml build
- podman-compose -f compose-build.yml push
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
deleted file mode 100644
index 12d4a1c8e..000000000
--- a/.github/workflows/docs.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: Build & deploy docs
-
-on: [push]
-
-jobs:
- build-docs:
- name: Build and deploy documentation
- runs-on: ubuntu-latest
- if: github.repository == 'StanfordVL/iGibson' && github.ref == 'refs/heads/master'
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v2
-
- - name: Setup python
- uses: actions/setup-python@v2
- with:
- python-version: "3.8"
- architecture: x64
-
- - name: Install dev requirements
- run: pip install -r requirements-dev.txt
-
- - name: Generate docs
- working-directory: ./docs
- run: make html
-
- - name: Deploy to gh-pages
- uses: peaceiris/actions-gh-pages@v3
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- publish_dir: ./docs/_build/html
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
deleted file mode 100644
index 27812afcb..000000000
--- a/.github/workflows/release.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-name: Release on PyPI
-
-on:
- release:
- types:
- - created
-
-jobs:
- build:
- runs-on: "ubuntu-latest"
- steps:
- - uses: actions/checkout@master
- - name: Set up Python 3.8
- uses: actions/setup-python@v1
- with:
- python-version: 3.8
- - name: Install pypa/build
- run: >-
- python -m
- pip install
- build
- --user
- - name: Build a binary wheel and a source tarball
- run: >-
- python -m
- build
- --sdist
- --outdir dist/
- .
- - name: Publish a Python distribution to PyPI
- if: github.repository == 'StanfordVL/iGibson' && startsWith(github.ref, 'refs/tags')
- uses: pypa/gh-action-pypi-publish@release/v1
- with:
- user: __token__
- password: ${{ secrets.PYPI_API_TOKEN }}
diff --git a/.github/workflows/sync-repos.yml b/.github/workflows/sync-repos.yml
deleted file mode 100644
index de70e0b3c..000000000
--- a/.github/workflows/sync-repos.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: Sync iGibson-dev/master to iGibson/master
-
-on:
- workflow_dispatch:
- schedule:
- # Sync repositories on the 1st and 15th of the month at 9 AM
- - cron: "0 17 1,15 * *"
-
-jobs:
- sync-private-fork:
- runs-on: ubuntu-latest
- if: github.ref == 'refs/heads/master'
- steps:
- - uses: actions/checkout@v2
- - name: Sync iGibson-dev/master to iGibson/master
- if: github.repository == 'StanfordVL/iGibson-dev' && startsWith(github.ref, 'refs/tags')
- run:
- git push https://$PERSONAL_ACCESS_TOKEN:x-oauth-basic@github.com/StanfordVL/iGibson.git master:master
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
deleted file mode 100644
index 41d368318..000000000
--- a/.github/workflows/tests.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-name: Tests
-
-on: [pull_request]
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
- cancel-in-progress: true
-
-jobs:
- test:
- runs-on: [self-hosted, linux, gpu]
- if: github.repository == 'StanfordVL/iGibson-dev'
-
- steps:
- - name: Checkout source
- uses: actions/checkout@v2
- with:
- submodules: true
- path: igibson
-
- - name: Add CUDA to env
- run: echo "/usr/local/cuda/bin" >> $GITHUB_PATH
-
- - name: Setup python
- uses: actions/setup-python@v2
- with:
- python-version: "3.8"
- architecture: x64
-
- - name: Install dev requirements
- working-directory: igibson
- run: pip install -r requirements-dev.txt
-
- - name: Install
- working-directory: igibson
- run: pip install -e .
-
- - name: Checkout BDDL
- uses: actions/checkout@v2
- with:
- repository: StanfordVL/bddl-dev
- ref: 581be50e7cfd2b3a1447aaa1b4fc2424b673339c
- token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT is required since this is a different repo
- path: bddl
- submodules: recursive
- lfs: true
-
- - name: Install BDDL
- working-directory: bddl
- run: pip install -e .
-
- - name: Link Dataset
- working-directory: igibson
- run: ln -s /scr/ig-data igibson/data
-
- # The below method of checking out ig-dataset is currently unused due to poor speed.
- # - name: Create data directory
- # run: mkdir -p igibson/igibson/data
- #
- # - name: Checkout ig_dataset
- # uses: actions/checkout@v2
- # with:
- # repository: StanfordVL/ig_dataset
- # token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT is required since this is a different repo
- # path: igibson/igibson/data/ig_dataset
- # submodules: recursive
- # lfs: true
- #
- # - name: Checkout ig_assets
- # uses: actions/checkout@v2
- # with:
- # repository: StanfordVL/ig_assets
- # token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT is required since this is a different repo
- # path: igibson/igibson/data/assets
- # submodules: recursive
- # lfs: true
-
- - name: Run tests
- working-directory: igibson
- run: pytest
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2.1.0
- with:
- token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 8efbad282..fe2169e29 100644
--- a/.gitignore
+++ b/.gitignore
@@ -81,6 +81,8 @@ igibson/utils/data_utils/mesh_decimation/final_videos
# libcryptopp
igibson/render/mesh_renderer/libcryptopp.so.8.6
-# Coverage
-.coverage
-coverage.xml
+# igibson run json files
+*my_igibson_run.json
+
+# Temp modifiable file.
+igibson/examples/configs/*yaml*
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c616f2c07..9cea96e6c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/psf/black
- rev: 21.12b0
+ rev: stable # Replace by any tag/version: https://github.com/psf/black/tags
hooks:
- id: black
language_version: python3 # Should be a command that runs python3.6+
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 000000000..f57094a94
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,89 @@
+pipeline {
+
+ agent {
+ docker {
+ image 'gibsonchallenge/gibsonv2:jenkins2'
+ args '--runtime=nvidia -v ${WORKSPACE}/../ig_dataset:${WORKSPACE}/igibson/data/ig_dataset'
+ }
+ }
+
+ stages {
+ stage('Build') {
+ steps {
+ sh 'nvidia-smi'
+ sh 'pwd'
+ sh 'printenv'
+ sh 'pip install -e .'
+ sh 'sudo chown -R jenkins ${WORKSPACE}/igibson/data/'
+ }
+ }
+
+ stage('Build Docs') {
+ steps {
+ sh 'sphinx-apidoc -o docs/apidoc igibson igibson/external igibson/utils/data_utils/'
+ sh 'sphinx-build -b html docs _sites'
+ }
+ }
+
+ stage('Test') {
+ steps {
+ sh 'mkdir result'
+ sh 'pytest igibson/test/test_binding.py --junitxml=test_result/test_binding.py.xml'
+ sh 'pytest igibson/test/test_render.py --junitxml=test_result/test_render.py.xml'
+ sh 'pytest igibson/test/test_pbr.py --junitxml=test_result/test_pbr.py.xml'
+ sh 'pytest igibson/test/test_object.py --junitxml=test_result/test_object.py.xml'
+ sh 'pytest igibson/test/test_simulator.py --junitxml=test_result/test_simulator.py.xml'
+ sh 'pytest igibson/test/test_igibson_env.py --junitxml=test_result/test_igibson_env.py.xml'
+ sh 'pytest igibson/test/test_scene_importing.py --junitxml=test_result/test_scene_importing.py.xml'
+ sh 'pytest igibson/test/test_robot.py --junitxml=test_result/test_robot.py.xml'
+ sh 'pytest igibson/test/test_igsdf_scene_importing.py --junitxml=test_result/test_igsdf_scene_importing.py.xml'
+ sh 'pytest igibson/test/test_sensors.py --junitxml=test_result/test_sensors.py.xml'
+ sh 'pytest igibson/test/test_motion_planning.py --junitxml=test_result/test_motion_planning.py.xml'
+ sh 'pytest igibson/test/test_states.py --junitxml=test_result/test_states.py.xml'
+ sh 'pytest igibson/test/test_determinism_against_same_version.py --junitxml=test_result/test_determinism_against_same_version.py.xml'
+ }
+ }
+
+ stage('Benchmark') {
+ steps {
+ sh 'python igibson/test/benchmark/benchmark_static_scene.py'
+ sh 'python igibson/test/benchmark/benchmark_interactive_scene.py'
+ }
+ }
+
+ }
+ post {
+ always {
+ junit 'test_result/*.xml'
+ archiveArtifacts artifacts: 'test_result/*.xml', fingerprint: true
+ archiveArtifacts artifacts: '*.pdf'
+ archiveArtifacts artifacts: '*.png'
+
+ publishHTML (target: [
+ allowMissing: true,
+ alwaysLinkToLastBuild: false,
+ keepAll: true,
+ reportDir: '_sites',
+ reportFiles: 'index.html',
+ includes: '**/*',
+ reportName: "iGibson docs"
+ ])
+
+ cleanWs()
+ }
+ failure {
+ script {
+ // Send an email only if the build status has changed from green/unstable to red
+ emailext subject: '$DEFAULT_SUBJECT',
+ body: '$DEFAULT_CONTENT',
+ recipientProviders: [
+ [$class: 'CulpritsRecipientProvider'],
+ [$class: 'DevelopersRecipientProvider'],
+ [$class: 'RequesterRecipientProvider']
+ ],
+ replyTo: '$DEFAULT_REPLYTO',
+ to: '$DEFAULT_RECIPIENTS'
+ }
+ }
+ }
+}
diff --git a/README.md b/README.md
index cf99216f7..2a0fb6eec 100644
--- a/README.md
+++ b/README.md
@@ -27,24 +27,24 @@ iGibson is a simulation environment providing fast visual rendering and physics
If you use iGibson or its assets and models, consider citing the following publication:
```
-@misc{li2021igibson,
- title={iGibson 2.0: Object-Centric Simulation for Robot Learning of Everyday Household Tasks},
- author={Chengshu Li and Fei Xia and Roberto Mart\'in-Mart\'in and Michael Lingelbach and Sanjana Srivastava and Bokui Shen and Kent Vainio and Cem Gokmen and Gokul Dharan and Tanish Jain and Andrey Kurenkov and Karen Liu and Hyowon Gweon and Jiajun Wu and Li Fei-Fei and Silvio Savarese},
+@misc{shen2021igibson,
+ title={iGibson 1.0: a Simulation Environment for Interactive Tasks in Large Realistic Scenes},
+ author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Martín-Martín and Linxi Fan and Guanzhi Wang and Claudia Pérez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese},
year={2021},
- eprint={2108.03272},
+ eprint={2012.02924},
archivePrefix={arXiv},
- primaryClass={cs.RO}
+ primaryClass={cs.AI}
}
```
```
-@inproceedings{shen2021igibson,
- title={iGibson 1.0: a Simulation Environment for Interactive Tasks in Large Realistic Scenes},
- author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Mart\'in-Mart\'in and Linxi Fan and Guanzhi Wang and Claudia P\'erez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese},
- booktitle={2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
+@misc{li2021igibson,
+ title={iGibson 2.0: Object-Centric Simulation for Robot Learning of Everyday Household Tasks},
+ author={Chengshu Li and Fei Xia and Roberto Martín-Martín and Michael Lingelbach and Sanjana Srivastava and Bokui Shen and Kent Vainio and Cem Gokmen and Gokul Dharan and Tanish Jain and Andrey Kurenkov and Karen Liu and Hyowon Gweon and Jiajun Wu and Li Fei-Fei and Silvio Savarese},
year={2021},
- pages={accepted},
- organization={IEEE}
+ eprint={2108.03272},
+ archivePrefix={arXiv},
+ primaryClass={cs.RO}
}
```
@@ -57,9 +57,9 @@ If you want to know more about iGibson, you can also check out [our webpage](htt
For instructions to install iGibson and download dataset, you can visit [installation guide](http://svl.stanford.edu/igibson/docs/installation.html) and [dataset download guide](http://svl.stanford.edu/igibson/docs/dataset.html).
-There are other datasets we link to iGibson. We include support to use CubiCasa5K and 3DFront scenes, adding up more than 10000 extra interactive scenes to use in iGibson! Check our [documentation](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils/ext_scene) on how to use those.
+There are other datasets we link to iGibson. We include support to use CubiCasa5K and 3DFront scenes, adding up more than 8000 extra interactive scenes to use in iGibson! Check our documentation on how to use those.
-We also maintain compatibility with datasets of 3D reconstructed large real-world scenes (homes and offices) that you can download and use with iGibson. For Gibson Dataset and Stanford 2D-3D-Semantics Dataset, please fill out this [form](https://forms.gle/36TW9uVpjrE1Mkf9A). For Matterport3D Dataset, please fill in this [form](http://dovahkiin.stanford.edu/matterport/public/MP_TOS.pdf) and send it to [matterport3d@googlegroups.com](mailto:matterport3d@googlegroups.com). Please put "use with iGibson simulator" in your email. Check our [dataset download guide](http://svl.stanford.edu/igibson/docs/dataset.html) for more details.
+We also maintain compatibility with datasets of 3D reconstructed large real-world scenes (homes and offices) that you can download and use with iGibson, for example from our previous simulator, Gibson. All of them will be accessible once you fill in this [form].
### Using iGibson with VR
If you want to use iGibson VR interface, please visit the [VR guide (TBA)].
diff --git a/docker/.env b/docker/.env
deleted file mode 100644
index d98efa377..000000000
--- a/docker/.env
+++ /dev/null
@@ -1,3 +0,0 @@
-REGISTRY=docker.io
-REPO=igibson
-VERSION=v2.0.4
diff --git a/docker/tensorflow/Dockerfile b/docker/base/Dockerfile
similarity index 100%
rename from docker/tensorflow/Dockerfile
rename to docker/base/Dockerfile
diff --git a/docker/tensorflow/build.sh b/docker/base/build.sh
similarity index 100%
rename from docker/tensorflow/build.sh
rename to docker/base/build.sh
diff --git a/docker/tensorflow/run.sh b/docker/base/run.sh
similarity index 100%
rename from docker/tensorflow/run.sh
rename to docker/base/run.sh
diff --git a/docker/behavior/Dockerfile b/docker/behavior/Dockerfile
index 39668af25..33201887f 100644
--- a/docker/behavior/Dockerfile
+++ b/docker/behavior/Dockerfile
@@ -1,8 +1,38 @@
-ARG REGISTRY=docker.io
-ARG REPO=igibson
-ARG VERSION=v2.0.4
+FROM nvidia/cudagl:11.1.1-devel-ubuntu20.04
-FROM $REGISTRY/$REPO:$VERSION
+ARG DEBIAN_FRONTEND=noninteractive
-RUN git clone --depth 1 --branch master https://github.com/StanfordVL/bddl /opt/bddl --recursive
-RUN pip install --no-cache-dir -e /opt/bddl
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ cmake \
+ git \
+ vim \
+ wget \
+ curl \
+ python3-dev \
+ python3-opencv \
+ python3-pip && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
+RUN bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b
+RUN rm Miniconda-latest-Linux-x86_64.sh
+ENV PATH=/miniconda/bin:${PATH}
+RUN conda update -y conda
+RUN conda create -y -n igibson python=3.8.0
+
+ENV PATH /miniconda/envs/igibson/bin:$PATH
+
+RUN pip install torch pytest ray[default,rllib] stable-baselines3
+
+RUN git clone --branch ig-develop https://github.com/StanfordVL/iGibson /opt/iGibson --recursive
+WORKDIR /opt/iGibson
+RUN pip install -e .
+
+RUN git clone --branch master https://github.com/StanfordVL/BDDL /opt/BDDL --recursive
+WORKDIR /opt/BDDL
+RUN pip install -e .
+
+RUN python3 -m igibson.utils.assets_utils --download_assets
+RUN python3 -m igibson.utils.assets_utils --download_demo_data
+
+WORKDIR /opt/iGibson/igibson
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
deleted file mode 100644
index 512092dcf..000000000
--- a/docker/docker-compose.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-version: "3.7"
-
-services:
- igibson:
- image: ${REGISTRY}/${REPO}:${VERSION}
- build:
- context: ./igibson
- dockerfile: Dockerfile
-
- igibson-tensorflow:
- image: ${REGISTRY}/${REPO}:${VERSION}-tensorflow
- build:
- context: ./tensorflow
- dockerfile: Dockerfile
-
- behavior:
- image: ${REGISTRY}/${REPO}:${VERSION}-behavior
- build:
- context: ./behavior
- dockerfile: Dockerfile
- args:
- - VERSION=${VERSION}
- - REGISTRY=${REGISTRY}
- - REPO=${REPO}
-
diff --git a/docker/headless-gui/Dockerfile b/docker/headless-gui/Dockerfile
new file mode 100644
index 000000000..9e894a7d0
--- /dev/null
+++ b/docker/headless-gui/Dockerfile
@@ -0,0 +1,25 @@
+FROM igibson/igibson:latest
+
+# add dummy display and remote GUI via x11VNC
+
+RUN DEBIAN_FRONTEND=noninteractive apt install -y \
+ xserver-xorg-video-dummy \
+ xfce4 desktop-base \
+ x11vnc net-tools
+# disable screensaver
+RUN apt autoremove -y xscreensaver
+
+# optional: if you want a richer desktop experience
+RUN DEBIAN_FRONTEND=noninteractive apt install -y \
+ xfce4-terminal firefox
+RUN echo 2 | update-alternatives --config x-terminal-emulator
+# ==== end of optional =====
+
+RUN mkdir -p /opt/misc /opt/logs
+COPY x-dummy.conf /opt/misc
+COPY entrypoint.sh /opt/misc
+
+ENV QT_X11_NO_MITSHM=1
+ENV DISPLAY=:0
+WORKDIR /opt/igibson/igibson/examples/demo
+ENTRYPOINT ["/opt/misc/entrypoint.sh"]
diff --git a/docker/headless-gui/build.sh b/docker/headless-gui/build.sh
new file mode 100755
index 000000000..cf79d7589
--- /dev/null
+++ b/docker/headless-gui/build.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+IMAGE=igibson/igibson-gui
+
+export DOCKER_BUILDKIT=1
+docker build -t $IMAGE:v1.0.1 . \
+ && docker tag $IMAGE:v1.0.1 $IMAGE:latest \
+ && echo BUILD SUCCESSFUL
diff --git a/docker/headless-gui/entrypoint.sh b/docker/headless-gui/entrypoint.sh
new file mode 100755
index 000000000..a8ef16a4e
--- /dev/null
+++ b/docker/headless-gui/entrypoint.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+VNC_PASSWORD=${VNC_PASSWORD:-112358}
+
+# start X server with dummy display on :0
+X -config /opt/misc/x-dummy.conf > /opt/logs/x-dummy.log 2>&1 &
+
+sleep 2
+
+# start xcfe desktop
+startxfce4 > /opt/logs/xcfe4.log 2>&1 &
+
+# start x11VNC server. Must publish port 5900 at `docker run`
+x11vnc -display :0 -noxrecord -noxfixes -noxdamage -forever -passwd $VNC_PASSWORD > /opt/logs/x11vnc.log 2>&1 &
+
+"$@"
diff --git a/docker/headless-gui/run.sh b/docker/headless-gui/run.sh
new file mode 100755
index 000000000..7957845bc
--- /dev/null
+++ b/docker/headless-gui/run.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+VNC_PORT=5900
+VNC_PASSWORD=112358
+
+PARAMS=
+while (( "$#" )); do
+ case "$1" in
+ -p|--vnc-port)
+ VNC_PORT=$2
+ shift 2
+ ;;
+ -pw|--vnc-password)
+ VNC_PASSWORD=$2
+ shift 2
+ ;;
+ --) # end argument parsing
+ shift
+ break
+ ;;
+ -*|--*=) # unsupported flags
+ echo "Error: Unsupported flag $1" >&2
+ exit 1
+ ;;
+ *) # preserve positional arguments
+ PARAMS="$PARAMS $1"
+ shift
+ ;;
+ esac
+done
+
+echo Starting VNC server on port $VNC_PORT with password $VNC_PASSWORD
+echo please run \"python simulator_example.py\" once you see the docker command prompt:
+docker run --gpus all -ti -p $VNC_PORT:5900 -e VNC_PASSWORD=$VNC_PASSWORD --rm igibson/igibson-gui:latest bash
\ No newline at end of file
diff --git a/docker/headless-gui/x-dummy.conf b/docker/headless-gui/x-dummy.conf
new file mode 100644
index 000000000..de4648bef
--- /dev/null
+++ b/docker/headless-gui/x-dummy.conf
@@ -0,0 +1,25 @@
+Section "Monitor"
+ Identifier "Monitor0"
+ HorizSync 28.0-80.0
+ VertRefresh 48.0-75.0
+ # https://arachnoid.com/modelines/
+ # 1920x1080 @ 60.00 Hz (GTF) hsync: 67.08 kHz; pclk: 172.80 MHz
+ Modeline "1920x1080_60.00" 172.80 1920 2040 2248 2576 1080 1081 1084 1118 -HSync +Vsync
+EndSection
+
+Section "Device"
+ Identifier "Card0"
+ Driver "dummy"
+ VideoRam 256000
+EndSection
+
+Section "Screen"
+ DefaultDepth 24
+ Identifier "Screen0"
+ Device "Card0"
+ Monitor "Monitor0"
+ SubSection "Display"
+ Depth 24
+ Modes "1920x1080_60.00"
+ EndSubSection
+EndSection
\ No newline at end of file
diff --git a/docker/igibson/Dockerfile b/docker/igibson/Dockerfile
deleted file mode 100644
index cbf6e52b7..000000000
--- a/docker/igibson/Dockerfile
+++ /dev/null
@@ -1,41 +0,0 @@
-FROM nvidia/cudagl:11.3.1-base-ubuntu20.04
-
-ARG DEBIAN_FRONTEND=noninteractive
-
-RUN apt-get update && apt-get install -y --no-install-recommends \
- cmake \
- curl \
- g++ \
- git \
- make \
- vim \
- wget \
- cuda-command-line-tools-11-3 && \
- rm -rf /var/lib/apt/lists/*
-
-RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
-RUN bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b
-RUN rm Miniconda-latest-Linux-x86_64.sh
-ENV PATH=/miniconda/bin:${PATH}
-
-RUN conda update -y conda
-RUN conda create -y -n igibson python=3.8.0
-
-ENV PATH /miniconda/envs/igibson/bin:$PATH
-
-# NOTE: This needs to be updated in-step with the base cudagl image so the tensor renderer works
-RUN pip install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html --no-cache-dir
-
-RUN git clone --depth 1 https://github.com/StanfordVL/iGibson /opt/iGibson --recursive && \
- rm -rf /opt/iGibson/igibson/render/openvr/samples
-
-RUN pip install --no-cache-dir -e .
-
-RUN pip install --no-cache-dir pytest ray[default,rllib] stable-baselines3 && rm -rf /root/.cache
-
-WORKDIR /opt/iGibson
-
-RUN python3 -m igibson.utils.assets_utils --download_assets && rm -rf /tmp/*
-RUN python3 -m igibson.utils.assets_utils --download_demo_data && rm -rf /tmp/*
-
-WORKDIR /opt/igibson/igibson/examples/demo
diff --git a/docker/igibson2/Dockerfile b/docker/igibson2/Dockerfile
new file mode 100644
index 000000000..82b66c33c
--- /dev/null
+++ b/docker/igibson2/Dockerfile
@@ -0,0 +1,34 @@
+FROM nvidia/cudagl:11.1.1-devel-ubuntu20.04
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ cmake \
+ git \
+ wget \
+ curl \
+ python3-dev \
+ python3-opencv \
+ python3-pip && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
+RUN bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b
+RUN rm Miniconda-latest-Linux-x86_64.sh
+ENV PATH=/miniconda/bin:${PATH}
+RUN conda update -y conda
+RUN conda create -y -n igibson python=3.8.0
+
+ENV PATH /miniconda/envs/igibson/bin:$PATH
+
+RUN pip install --no-cache-dir https://github.com/StanfordVL/bullet3/archive/master.zip
+RUN pip install torch pytest ray[default,rllib] stable-baselines3
+
+RUN git clone --branch master https://github.com/StanfordVL/iGibson /opt/igibson --recursive
+WORKDIR /opt/igibson
+RUN pip install -e .
+
+RUN python3 -m igibson.utils.assets_utils --download_assets
+RUN python3 -m igibson.utils.assets_utils --download_demo_data
+
+WORKDIR /opt/igibson/igibson/examples/demo
diff --git a/docker/igibson/build.sh b/docker/igibson2/build.sh
similarity index 100%
rename from docker/igibson/build.sh
rename to docker/igibson2/build.sh
diff --git a/docker/igibson/run.sh b/docker/igibson2/run.sh
similarity index 100%
rename from docker/igibson/run.sh
rename to docker/igibson2/run.sh
diff --git a/docker/jenkins/Dockerfile b/docker/jenkins/Dockerfile
new file mode 100644
index 000000000..d63f5e1de
--- /dev/null
+++ b/docker/jenkins/Dockerfile
@@ -0,0 +1,54 @@
+from nvidia/cudagl:10.0-base-ubuntu18.04
+
+ARG CUDA=10.0
+ARG CUDNN=7.6.2.24-1
+
+RUN apt-get update && \
+ apt-get -y install sudo
+
+ARG USER_ID
+ARG GROUP_ID
+
+RUN addgroup --gid $GROUP_ID jenkins
+RUN adduser --disabled-password --gecos '' --uid $USER_ID --gid $GROUP_ID jenkins
+
+ENV user jenkins
+
+RUN chown -R ${user} /home/${user} && \
+ adduser ${user} sudo && \
+ echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+
+USER ${user}
+WORKDIR /home/${user}
+
+RUN sudo apt-get install -y --no-install-recommends \
+ curl build-essential git cmake \
+ cuda-command-line-tools-10-0 \
+ cuda-cublas-10-0 \
+ cuda-cufft-10-0 \
+ cuda-curand-10-0 \
+ cuda-cusolver-10-0 \
+ cuda-cusparse-10-0 \
+ libcudnn7=${CUDNN}+cuda${CUDA} \
+ vim \
+ tmux \
+ libhdf5-dev \
+ libsm6 \
+ libxext6 \
+ libxrender-dev \
+ wget
+
+# Install miniconda to /miniconda
+RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
+RUN bash Miniconda-latest-Linux-x86_64.sh -p /home/${user}/miniconda -b
+RUN rm Miniconda-latest-Linux-x86_64.sh
+ENV PATH=/home/${user}/miniconda/bin:${PATH}
+RUN conda update -y conda
+RUN conda create -y -n py3-igibson python=3.6.8
+# Python packages from conda
+
+ENV PATH /home/${user}/miniconda/envs/py3-igibson/bin:$PATH
+
+RUN pip install pytest
+RUN pip install Cython==0.21.1
\ No newline at end of file
diff --git a/docker/jenkins/build.sh b/docker/jenkins/build.sh
new file mode 100644
index 000000000..03207a8a6
--- /dev/null
+++ b/docker/jenkins/build.sh
@@ -0,0 +1 @@
+docker build -t gibsonchallenge/gibsonv2:jenkins2 --build-arg USER_ID=$(id -u jenkins) --build-arg GROUP_ID=$(id -g jenkins) .
diff --git a/docker/pull-images.sh b/docker/pull-images.sh
new file mode 100755
index 000000000..d6fc273d0
--- /dev/null
+++ b/docker/pull-images.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+VERSION=1.0.1
+
+docker pull igibson/igibson:v$VERSION
+docker pull igibson/igibson:latest
+docker pull igibson/igibson-gui:v$VERSION
+docker pull igibson/igibson-gui:latest
diff --git a/docker/push-images.sh b/docker/push-images.sh
new file mode 100755
index 000000000..f7dfda01f
--- /dev/null
+++ b/docker/push-images.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+VERSION=1.0.1
+
+docker push igibson/igibson:v$VERSION
+docker push igibson/igibson:latest
+docker push igibson/igibson-gui:v$VERSION
+docker push igibson/igibson-gui:latest
diff --git a/docs/assets.md b/docs/assets.md
index abe3cba49..573127d3f 100644
--- a/docs/assets.md
+++ b/docs/assets.md
@@ -69,3 +69,7 @@ We also include [YCB objects](http://www.ycbbenchmarks.com/object-models/) in `y
## Test meshes
In `test` folder we save a mesh file to test the renderer is compiled correctly, it is used by `test/test_render.py`.
+
+## Example configs
+
+Sample config files to be used with the demo.
\ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
index ed8f39c42..7c1bbc072 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -23,7 +23,6 @@
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
-sys.path.append(os.path.abspath("../"))
import igibson
project = "iGibson"
@@ -51,7 +50,6 @@
"sphinx_markdown_tables",
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
- "myst_parser",
]
html_theme = "sphinx_rtd_theme"
diff --git a/docs/dataset.md b/docs/dataset.md
index b8a9c30df..50cc050d8 100644
--- a/docs/dataset.md
+++ b/docs/dataset.md
@@ -1,35 +1,39 @@
-# Datasets
+Dataset
+==========================================
-In this page you will find information about:
+In dataset we include two parts. First we introduce the new iGibson dataset in this release. Secondly, we introduce
+ how to download previous Gibson dataset, which is updated and compatible with iGibson.
-- [How to download iGibson 2.0 scenes and the BEHAVIOR Dataset of Objects](#download-igibson-2-0-scenes-and-the-behavior-dataset-of-objects)
-- [How to download iGibson 1.0 scenes](#download-igibson-1-0-scenes)
-- [How to download Gibson and Stanford 2D-3D-Semantics scenes](#download-gibson-and-stanford-2d-3d-semantics-scenes)
-- [How to download Matterport3D scenes](#download-matterport3d-scenes)
+- [Download iGibson 2.0 Scenes and Behavior Dataset of Objects](#download-igibson-2.0-scenes-and-behavior-dataset-of-objects)
+- [Download iGibson 1.0 Scenes](#download-igibson-1.0-scenes)
+- [Download Gibson Scenes](#download-gibson-scenes)
-## Download iGibson 2.0 Scenes and the BEHAVIOR Dataset of Objects
+Download iGibson 2.0 Scenes and Behavior Dataset of Objects
+-------------------------
-What will you download?
-- **iGibson 2.0 Dataset of Scenes**: New versions of the fully interactive scenes, more densely populated with objects.
-- **BEHAVIOR Object Dataset**: Dataset of object models annotated with physical and semantic properties. The 3D models are free to use within iGibson 2.0 for BEHAVIOR (due to artists' copyright, models are encrypted and allowed only to be used with iGibson 2.0). You can download a bundle of the iGibson 2.0 dataset of scenes and the BEHAVIOR dataset of objects here.
+- iGibson 2.0 Dataset of Scenes: New versions of the fully interactive scenes, more densely populated with objects.
+- BEHAVIOR Object Dataset: Dataset of object models annotated with physical and semantic properties. The 3D models are free to use within iGibson 2.0 for BEHAVIOR (due to artists' copyright, models are encrypted and allowed only to be used with iGibson 2.0). You can download a bundle of the iGibson 2.0 dataset of scenes and the BEHAVIOR dataset of objects here.
To download both in a bundle, you need to follow the following steps:
-- Fill out the license agreement in this [form](https://docs.google.com/forms/d/e/1FAIpQLScPwhlUcHu_mwBqq5kQzT2VRIRwg_rJvF0IWYBk_LxEZiJIFg/viewform)
-- You will receive a key (igibson.key). Move it into the subfolder of the iGibson repository that contains the dataset, for example, iGibson/igibson/data
-- Download the behavior data bundle (ig_dataset) from [here](https://storage.googleapis.com/gibson_scenes/behavior_data_bundle.zip)
-- Unzip ig_dataset into the folder: `unzip behavior_data_bundle.zip -d iGibson/igibson/data`
+Request access to the BEHAVIOR assets using this [form](https://docs.google.com/forms/d/e/1FAIpQLScPwhlUcHu_mwBqq5kQzT2VRIRwg_rJvF0IWYBk_LxEZiJIFg/viewform):
+- Fill out the license agreement as suggested in the [form](https://docs.google.com/forms/d/e/1FAIpQLScPwhlUcHu_mwBqq5kQzT2VRIRwg_rJvF0IWYBk_LxEZiJIFg/viewform)
+- When done, copy the key you receive (igibson.key) into the repository subfolder folder iGibson/igibson/data
+- Download the behavior data bundle (ig_dataset) [here](https://storage.googleapis.com/gibson_scenes/behavior_data_bundle.zip).
+- Unzip ig_dataset:
+ - `mkdir iGibson/igibson/data`
+ - `unzip behavior_data_bundle.zip -d iGibson/igibson/data`
-After this process, you will be able to sample and use the scenes and objects in iGibson, for example, to evaluate your embodied AI solutions in the [BEHAVIOR benchmark](https://behavior.stanford.edu/).
-## Download iGibson 1.0 Scenes
+Download iGibson 1.0 Scenes
+------------------------
-What will you download?
-- **iGibson 1.0 Dataset of Scenes**: We annotated fifteen 3D reconstructions of real-world scans and converted them into fully interactive scene models. In this process, we respect the original object-instance layout and object-category distribution. The object models are extended from open-source datasets ([ShapeNet Dataset](https://www.shapenet.org/), [Motion Dataset](http://motiondataset.zbuaa.com/), [SAPIEN Dataset](https://sapien.ucsd.edu/)) enriched with annotations of material and dynamic properties.
+We annotate fifteen 3D reconstructions of real-world scans and convert them into fully interactive scene models. In this process, we respect the original object-instance layout and object-category distribution. The object models are extended from open-source datasets ([ShapeNet Dataset](https://www.shapenet.org/), [Motion Dataset](http://motiondataset.zbuaa.com/), [SAPIEN Dataset](https://sapien.ucsd.edu/)) enriched with annotations of material and dynamic properties.
-The following image shows the fifteen fully interactive scenes:
+The fifteen fully interactive models are visualized below.
![placeholder.jpg](images/ig_scene.png)
+#### Download Instruction
To download the dataset, you need to first configure where the dataset is to be stored. You can change it in `your_installation_path/igibson/global_config.yaml` (default and recommended: `ig_dataset: your_installation_path/igibson/data/ig_dataset`). iGibson scenes can be downloaded with one single line:
```bash
@@ -37,38 +41,54 @@ python -m igibson.utils.assets_utils --download_ig_dataset
```
If the script fails to work, you can download from this [direct link](https://storage.googleapis.com/gibson_scenes/ig_dataset.tar.gz) and extract to `your_installation_path/igibson/data/ig_dataset`.
+#### Dataset Format
+The new dataset format can be found [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils).
-A description of the file structure and format of the files in the dataset can be found [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils).
+#### Cubicasa / 3D Front Dataset
+We provide support for Cubicasa and 3D Front Dataset, to import them into iGibson, follow the guide [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils/ext_scene).
-**Cubicasa / 3D Front Dataset Support:** We provide support for Cubicasa and 3D Front Dataset providing more than 10000 additional scenes (with less furniture than our fifteen scenes). To import them into iGibson, follow the instructions [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils/ext_scene).
+Download Gibson Scenes
+------------------------
+Original Gibson Environment Dataset has been updated to use with iGibson simulator. The link will first take you to
+ the license agreement and then to the data.
-## Download Gibson and Stanford 2D-3D-Semantics scenes
+[[ Get download link for Gibson Data ]].
-What will you download?
-- **Gibson static scenes**: more than 500 reconstructions of homes and offices with a Matterport device. These models keep the texture observed with the sensor, but contain some irregularities, specially with reflective surfaces and thin elements like chairs' legs.
-- **Stanford 2D-3D-Semantics scenes**: 7 reconstructions of Stanford offices.
+License Note: The dataset license is included in the above link. The license in this repository covers only the provided software.
-Files included in the dataset:
+Files included in this distribution:
-- All scenes, 572 scenes (108GB): gibson_v2_all.tar.gz
-- 4+ partition, 106 scenes, with textures better packed (2.6GB): gibson_v2_4+.tar.gz
-- Stanford 2D-3D-Semantics, 7 scenes (1.4GB): 2d3ds_for_igibson.zip
+1. All scenes, 572 scenes (108GB): gibson_v2_all.tar.gz
+2. 4+ partition, 106 scenes, with textures better packed (2.6GB): gibson_v2_4+.tar.gz
+3. Demo scene `Rs`
-We have updated these datasets to be used with iGibson so that users can keep developing and studying pure navigation solutions. The following link will bring you to a license agreement and then to a downloading URL: [form](https://forms.gle/36TW9uVpjrE1Mkf9A)
-
-After filling in the agreement, you will obtain a downloading `URL`.
-You can download the data manually and store it in the path set in `your_installation_path/igibson/global_config.yaml` (default and recommended: `g_dataset: your_installation_path/igibson/data/g_dataset`).
-Alternatively, you can run a single command to download the dataset, decompress, and place it in the correct folder:
+To download 1 and 2, you need to fill in the agreement and get the download link `URL`, after which you can
+ manually download and store them in the path set in `your_installation_path/igibson/global_config.yaml` (default and
+ recommended: `dataset: your_installation_path/igibson/data/g_dataset`). You can run a single command to download the dataset
+ , this script automatically download, decompress, and put the dataset to correct place.
```bash
python -m igibson.utils.assets_utils --download_dataset URL
```
-The Gibson Environment Dataset consists of 572 models and 1440 floors. We cover a diverse set of models including households, offices, hotels, venues, museums, hospitals, construction sites, etc. A diverse set of visualization of all spaces in Gibson can be seen [here](http://gibsonenv.stanford.edu/database/).
-The following image shows some of the environments:
+To download 3, you can run:
+
+```bash
+python -m igibson.utils.assets_utils --download_demo_data
+```
+
+
+### Original Gibson Environment Dataset Description (Non-interactive)
+
+
+Full Gibson Environment Dataset consists of 572 models and 1440 floors. We cover a diverse set of models including households, offices, hotels, venues, museums, hospitals, construction sites, etc. A diverse set of visualization of all spaces in Gibson can be seen [here](http://gibsonenv.stanford.edu/database/).
+
![spaces.png](images/spaces.png)
-**Gibson Dataset Metadata:** Each space in the database has some metadata with the following attributes associated with it. The metadata is available in this [JSON file](https://raw.githubusercontent.com/StanfordVL/GibsonEnv/master/gibson/data/data.json).
+
+#### Dataset Metadata
+
+Each space in the database has some metadata with the following attributes associated with it. The metadata is available in this [JSON file](https://raw.githubusercontent.com/StanfordVL/GibsonEnv/master/gibson/data/data.json).
```
id # the name of the space, e.g. ""Albertville""
area # total metric area of the building, e.g. "266.125" sq. meters
@@ -81,13 +101,10 @@ split_full+ # if the space is in train/val/test/none split of Full+
split_medium # if the space is in train/val/test/none split of Medium partition
split_tiny # if the space is in train/val/test/none split of Tiny partition
```
-- Floor Number: Total number of floors in each model. We calculate floor numbers using distinctive camera locations. We use `sklearn.cluster.DBSCAN` to cluster these locations by height and set minimum cluster size to `5`. This means areas with at least `5` sweeps are treated as one single floor. This helps us capture small building spaces such as backyard, attics, basements.
-- Area: Total floor area of each model. We calculate total floor area by summing up area of each floor. This is done by sampling point cloud locations based on floor height, and fitting a `scipy.spatial.ConvexHull` on sample locations.
-- SSA: Specific surface area. The ratio of inner mesh surface and volume of convex hull of the mesh. This is a measure of clutter in the models: if the inner space is placed with large number of furnitures, objects, etc, the model will have high SSA.
-- Navigation Complexity: The highest complexity of navigating between arbitrary points within the model. We sample arbitrary point pairs inside the model, and calculate `A∗` navigation distance between them. `Navigation Complexity` is equal to `A*` distance divide by `straight line distance` between the two points. We compute the highest navigation complexity for every model. Note that all point pairs are sample within the *same floor*.
-- Subjective Attributes: We examine each model manually, and note the subjective attributes of them. This includes their furnishing style, house shapes, whether they have long stairs, etc.
-**Gibson Dataset Format:** Each space in the database has its own folder. All the modalities and metadata for each space are contained in that folder.
+#### Dataset Format
+
+Each space in the database has its own folder. All the modalities and metadata for each space are contained in that folder.
```
mesh_z_up.obj # 3d mesh of the environment, it is also associated with an mtl file and a texture file, omitted here
floors.txt # floor height
@@ -98,13 +115,26 @@ floor_trav_{}.png # top down views of traversable areas for each floor
For the maps, each pixel represents 0.01m, and the center of the image correspond to `(0,0)` in the mesh, as well as in the pybullet coordinate system.
+#### Dataset Metrics
+
+
+**Floor Number** Total number of floors in each model.
+
+We calculate floor numbers using distinctive camera locations. We use `sklearn.cluster.DBSCAN` to cluster these locations by height and set minimum cluster size to `5`. This means areas with at least `5` sweeps are treated as one single floor. This helps us capture small building spaces such as backyard, attics, basements.
+
+**Area** Total floor area of each model.
+
+We calculate total floor area by summing up area of each floor. This is done by sampling point cloud locations based on floor height, and fitting a `scipy.spatial.ConvexHull` on sample locations.
+
+**SSA** Specific surface area.
+
+The ratio of inner mesh surface and volume of convex hull of the mesh. This is a measure of clutter in the models: if the inner space is placed with large number of furnitures, objects, etc, the model will have high SSA.
+
+**Navigation Complexity** The highest complexity of navigating between arbitrary points within the model.
-## Download Matterport3D Scenes
-What will you download?
-- Matterport3D Dataset: 90 scenes (3.2GB)
+We sample arbitrary point pairs inside the model, and calculate `A∗` navigation distance between them. `Navigation Complexity` is equal to `A*` distance divide by `straight line distance` between the two points. We compute the highest navigation complexity for every model. Note that all point pairs are sample within the *same floor*.
-Please fill in this [form](http://dovahkiin.stanford.edu/matterport/public/MP_TOS.pdf) and send it to [matterport3d@googlegroups.com](mailto:matterport3d@googlegroups.com). Please put "use with iGibson simulator" in your email.
+**Subjective Attributes**
-You'll then recieve a python script via email in response. Run `python download_mp.py --task_data igibson -o .` with the received script to download the data (3.2GB). Afterwards, move each of the scenes to the path set in `your_installation_path/igibson/global_config.yaml` (default and recommended: `g_dataset: your_installation_path/igibson/data/g_dataset`).
+We examine each model manually, and note the subjective attributes of them. This includes their furnishing style, house shapes, whether they have long stairs, etc.
-Reference: [Matterport3D webpage](https://niessner.github.io/Matterport/).
\ No newline at end of file
diff --git a/docs/environments.md b/docs/environments.md
index bdf6c9d6a..d9bfd6f01 100644
--- a/docs/environments.md
+++ b/docs/environments.md
@@ -52,9 +52,9 @@ We provide a few common termination conditions for robotics tasks.
Most of the code can be found in [igibson/termination_conditions](https://github.com/StanfordVL/iGibson/tree/master/igibson/termination_conditions).
#### Configs
-To instantiate an **Environment**, we first need to create a YAML config file. It will specify parameters for the **Environment** (e.g. robot type, action frequency, etc), the **Sensors** (e.g. sensor types, image resolution, noise rate, etc), the **Task** (e.g. task type, goal distance range, etc), the **Reward Functions** (e.g. reward types, reward scale, etc) and the **Termination Conditions** (e.g. goal convergence threshold, time limit, etc). Exapmles of config files can be found here: [configs](https://github.com/StanfordVL/iGibson/tree/master/igibson/configs).
+To instantiate an **Environment**, we first need to create a YAML config file. It will specify parameters for the **Environment** (e.g. robot type, action frequency, etc), the **Sensors** (e.g. sensor types, image resolution, noise rate, etc), the **Task** (e.g. task type, goal distance range, etc), the **Reward Functions** (e.g. reward types, reward scale, etc) and the **Termination Conditions** (e.g. goal convergence threshold, time limit, etc). Exapmles of config files can be found here: [examples/configs](https://github.com/StanfordVL/iGibson/tree/master/examples/configs).
-Here is one example: [configs/turtlebot_nav.yaml](https://github.com/StanfordVL/iGibson/blob/master/igibson/configs/turtlebot_nav.yaml)
+Here is one example: [examples/configs/turtlebot_point_nav.yaml](https://github.com/StanfordVL/iGibson/blob/master/examples/configs/turtlebot_point_nav.yaml)
```yaml
# scene
@@ -130,7 +130,8 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
```
@@ -182,8 +183,8 @@ Parameters of this config file are explained below:
| laser_link_name | scan_link | the link name of the LiDAR sensor in the robot URDF file |
| depth_noise_rate | 0.0 | noise rate for the depth camera. 0.1 means 10% of the pixels will be corrupted (set to 0.0) |
| scan_noise_rate | 0.0 | noise rate for the LiDAR. 0.1 means 10% of the rays will be corrupted (set to laser_linear_range) |
-| visible_target | true | whether to show visual markers for the target positions that are visible to the agent |
-| visible_path | true | whether to show visual markers for the computed shortest path that are visible to the agent |
+| visual_object_at_initial_target_pos | true | whether to show visual markers for the initial and target positions |
+| target_visual_object_visible_to_agent | false | whether these visual markers are visible to the agents |
### Examples
@@ -193,48 +194,29 @@ In this example, we show how to instantiate `iGibsonEnv` and how to step through
- `reward`: a scalar that represents the current reward
- `done`: a boolean that indicates whether the episode should terminate
- `info`: a python dictionary for bookkeeping purpose
-The code can be found here: [igibson/examples/environments/env_nonint_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/environments/env_nonint_example.py).
-
-If the execution fails with segfault 11, you may need to reduce texture scaling in the config file (igibson/configs/turtlebot_static_nav.yaml) to avoid out-of-memory error.
+The code can be found here: [igibson/examples/demo/env_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/env_example.py).
```python
-import logging
-import os
-from sys import platform
-
-import yaml
-
-import igibson
from igibson.envs.igibson_env import iGibsonEnv
+from time import time
+import igibson
+import os
from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import download_assets, download_demo_data
+import logging
def main():
- """
- Creates an iGibson environment from a config file with a turtlebot in Rs (not interactive).
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- # If they have not been downloaded before, download assets and Rs Gibson (non-interactive) models
- download_assets()
- download_demo_data()
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
+ config_filename = os.path.join(igibson.example_config_path, 'turtlebot_demo.yaml')
+ env = iGibsonEnv(config_file=config_filename, mode='gui')
for j in range(10):
- logging.info("Resetting environment")
env.reset()
for i in range(100):
- with Profiler("Environment action step"):
+ with Profiler('Environment action step'):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
+ logging.info(
+ "Episode finished after {} timesteps".format(i + 1))
break
env.close()
@@ -244,4 +226,4 @@ if __name__ == "__main__":
```
#### Interactive Environments
-In this example, we show how to instantiate `iGibsobEnv` with a fully interactive scene `Rs_int`. In this scene, the robot can interact with all the objects in the scene (chairs, tables, couches, etc). The code can be found here: [igibson/examples/environments/env_int_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/env_int_example.py).
+In this example, we show how to instantiate `iGibsobEnv` with a fully interactive scene `Rs_int`. In this scene, the robot can interact with all the objects in the scene (chairs, tables, couches, etc). The code can be found here: [igibson/examples/demo/env_interactive_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/env_interactive_example.py).
\ No newline at end of file
diff --git a/docs/examples.md b/docs/examples.md
deleted file mode 100644
index 5d67aa725..000000000
--- a/docs/examples.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Code Examples
-
-In this section, we include a description of most of the code examples you can find under `igibson/examples/`.
-There may be new examples since we wrote this, so check that folder!
-
-If you are interested in just getting started as an end-user, you only need check out `igibson/examples/environments`.
-
-If you are looking for examples of BEHAVIOR, the benchmark of household activities that uses iGibson, please check the BEHAVIOR repository at https://github.com/StanfordVL/behavior.
-
-- environments: how to instantiate iGibson environments with interactive or static scenes, optionally with a scene selector.
-- learning: how to train RL policies for robot navigation using stable baselines 3, and how to save and replay demos of agents for imitation learning.
-- objects: how to create, load, and place objects to predefined locations or using a logic sampler (e.g. onTop(A, B)), how to change texture as a function of the temperature, and how to generate the minimum volume bounding boxes of objects.
-- object_states: how to change various objects states, including dusty, stained, (water sources) toggled on, (cleaning tool) soaked, sliced, and temprature, and how to save and reload object states.
-- observations: how to generate different observation modalities such as RGB, depth, LiDAR, segmentation, etc.
-- renderer: how to use the renderer directly, without the physics engine.
-- robots: how to (keyboard) control robots with differential drive controllers, IK controllers and sampling-based motion planners.
-- ros: how to run ROS with iGibson as if it is the real world.
-- scenes: how to load interactive and non-interactive scenes, how to use domain randomization (of object models and/or texture), and how to create a tour video of the scenes.
-- vr: how to use iGibson with VR.
-- web_ui: how to start a web server that hosts iGibson environments.
-
diff --git a/docs/extended_states.md b/docs/extended_states.md
deleted file mode 100644
index c6c89cb5a..000000000
--- a/docs/extended_states.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# Extended States and Logic States
-
-### Extended States
-
-iGibson 2.0 is an object-oriented simulator: the simulator maintains and updates a list of objects with properties that change over-time.
-Object classes are organized following the WordNet hierarchy.
-Different to other simulators, iGibson 2.0 is not limited to kinematic properties of the objects (pose, velocity, acceleration, joint configurations for articulated objects), but it includes also a set of additional object-properties that we call "extended states".
-The extended states are:
-
-#### Temperature
-
-Real continuous value per object. It can change if the object is close/inside a source or sink of heat, and the source/sink is active.
-![extstates1.jpg](images/extstates1.jpg)
-
-#### Wetness level
-
-Integer value per object. It changes when in contact with a water dropplet.
-![extstates2.jpg](images/extstates2.jpg)
-
-#### Cleanliness (Dustiness and Stain Level)
-
-In iGibson 2.0, objects can be initialized with visible dust or stain particles on its surface. The number of particles at initialization corresponds to a 100% level of dustiness, d, or stains, s, as we assume that dust/stain particles cannot be generated after initialization. As particles are cleaned, the level decreases proportionally to the number of particles removed, reaching a level of 0% when all particles have been cleaned.
-![extstates3.jpg](images/extstates3.jpg)
-
-#### Toggled State
-Some object categories in iGibson 2.0 can be toggled on and off. iGibson 2.0 maintains and updates an internal binary functional state for those objects.
-![extstates4a.jpg](images/extstates4a.jpg)
-
-#### Sliced State
-Many cooking activities require the agent to slice objects, e.g. food items.
-In iGibson 2.0 some objects can be sliced into two halves and the state of the object will be updated.
-![extstates4b.jpg](images/extstates4b.jpg)
-
-
-## Logic States
-
-Based on the kinematic and extended states, iGibson 2.0 implements a list of logic predicates, checking conditions to indicate if a logic state is True or False.
-The logic states implemented and checked by iGibson 2.0 include:
-- InsideOf(o1,o2): Object o1 is inside of object o2 if we can find two orthogonal axes crossing at o1 center of mass that intersect o2 collision mesh in both directions.
-- OnTopOf(o1,o2): Object o1 is on top of object o2 if o2 is in the list of objects InSameNegativeVerticalAxisObjs(o1) and o2 is in the list of objects InSamePositiveVerticalAxisObjs(o1) and InContactWith(o1, o2), where InSamePositive/NegativeVerticalAxisObjs(o1) is the list of objects in the same positive/negative vertical axis as o1 and InContactWith(o1, o2) is whether the two objects are in physical contact.
-- NextTo(o1,o2): Object o1 is next to object o2 if o2 is in the list of objects InSameHorizontalPlaneObjs(o1) and l2(o1, o2) < tNextTo , where InSameHorizontalPlaneObjs(o1) is a list of objects in the same horizontal plane as o1, l2 is the L2 distance between the closest points of the two objects, and tNextTo is a distance threshold that is proportional to the average size of the two objects.
-- InContactWith(o1,o2): Object o1 is in contact with o2 if their surfaces are in contact in at least one point, i.e., o2 is in the list of objects InContactObjs(o1).
-- Under(o1,o2): Object o1 is under object o2 if o2 is in the list of objects InSamePositiveVerticalAxisObjs(o1) and o2 6 is in the list of objects InSameNegativeVerticalAxisObjs(o1).
-- OnFloor(o1,o2): Object o1 is on the room floor o2 if InContactWith(o1, o2) and o2 is of Room type.
-- Open(o): Any joints (internal articulated degrees of freedom) of object o are open. Only joints that are relevant to consider an object Open are used in the predicate computation, e.g. the door of a microwave but not the buttons and controls. To select the relevant joints, object models of categories that can be Open undergo an additional annotation that produces a RelevantJoints list. A joint is considered open if its joint state q is 5% over the lower limit, i.e. q > 0.05(qUpperLimit − qLowerLimit ) + qLowerLimit.
-- Cooked(o): The temperature of object o was over the cooked threshold, Tcooked , and under the burnt threshold, Tburnt, at least once in the history of the simulation episode, i.e., Tcooked ≤ T max o < Tburnt. We annotate the cooked temperature Tcooked for each object category that can be Cooked.
-- Burnt(o): The temperature of object o was over the burnt threshold, Tburnt, at least once in the history of the simulation episode, i.e., T max o ≥ Tburnt. We annotate the burnt temperature Tburnt for each object category that can be Burnt.
-- Frozen(o): The temperature of object o is under the freezing threshold, Tfrozen , i.e., To ≤ Tfrozen . We assume as default freezing temperature Tfrozen = 0◦C, a value that can be adapted per object category and model.
-- Soaked(o): The wetness level w of the object o is over a threshold, wsoaked , i.e., w ≥ wsoaked . The default value for the threshold is wsoaked = 1, (the object is soaked if it absorbs one or more droplets), a value that can be adapted per object category and model.
-- Dusty(o): The dustiness level d of the object o is over a threshold, ddusty , i.e., d > ddusty . The default value for the threshold is ddusty = 0.5, (half of the dust particles have been cleaned), a value that can be adapted per object category and model.
-- Stained(o): The stain level s of the object o is over a threshold, sstained , i.e., s > sstained . The default value for the threshold is sstained = 0.5, (half of the stain particles have been cleaned), a value that can be adapted per object category and model.
-- ToggledOn(o): Object o is toggled on or off. It is a direct query of the iGibson 2.0 objects’ extended state TS, the toggled state.
-- Sliced(o): Object o is sliced or not. It is a direct access of the iGibson 2.0 objects’ extended state SS, the sliced state.
-- InFoVOfAgent(o): Object o is in the field of view of the agent, i.e., at least one pixel of the image acquired by the agent’s onboard sensors corresponds to the surface of o.
-- InHandOfAgent(o): Object o is grasped by the agent’s hands (i.e. assistive grasping is activated on that object).
-- InReachOfAgent(o): Object o is within dreach = 2 meters away from the agent.
-- InSameRoomAsAgent(o): Object o is located in the same room as the agent.
-
-
-We do not annotate all objects all extended states.
-Objects are annotated with extended states that allow to query meaningful and useful logic states from them, e.g. temperature to query if food is cooked (or frozen or burned).
-This doesn't mean a table does not have temperature, but it is not relevant for most robotics activities simulated in iGibson 2.0.
-We annotate object classes with meaningful logic states that apply to them, and, possibly, the parameters to check these states.
-The list of classes and states can be found in the BDDL repository ([list of object characteristics](https://github.com/sanjanasrivastava/BDDL/blob/master/utils/synsets_to_filtered_properties_pruned_igibson.json)).
-
-The characteristics that can be annotated include (additional extended state and parameters in parenthesis):
- (e.g., the temperature is not necessary/relevant for non-food categories for most tasks of interest).
-- cookable: Can be Cooked (MaxTemperature, Temperature)
-- burnable: Can be Burnt (MaxTemperature, Temperature)
-- freezable: Can be Frozen (Temperature)
-- soakable: Can be Soaked (WetnessLevel)
-- dustyable: Can be Dusty (DustinessLevel)
-- stainable: Can be Stained (StainLevel)
-- toggleable: Can be ToggledOn (ToggledState)
-- sliceable: Can be Sliced (SlicedState)
-
-Additionally, some objects present characteristics to help changing the logic states of others (additional extended state in parenthesis):
-- Is a HeatSourceSink (ToggledState)
-- Is a DropletSource (ToggledState)
-- Is a CleaningTool
-- Is a Liquid
-- Is a Slicer
diff --git a/docs/images/extstates1.jpg b/docs/images/extstates1.jpg
deleted file mode 100644
index a73fca670..000000000
Binary files a/docs/images/extstates1.jpg and /dev/null differ
diff --git a/docs/images/extstates2.jpg b/docs/images/extstates2.jpg
deleted file mode 100644
index 11defc46e..000000000
Binary files a/docs/images/extstates2.jpg and /dev/null differ
diff --git a/docs/images/extstates3.jpg b/docs/images/extstates3.jpg
deleted file mode 100644
index e3b44afe9..000000000
Binary files a/docs/images/extstates3.jpg and /dev/null differ
diff --git a/docs/images/extstates4.jpg b/docs/images/extstates4.jpg
deleted file mode 100644
index 9b921b4f9..000000000
Binary files a/docs/images/extstates4.jpg and /dev/null differ
diff --git a/docs/images/extstates4a.jpg b/docs/images/extstates4a.jpg
deleted file mode 100644
index fcd409eb6..000000000
Binary files a/docs/images/extstates4a.jpg and /dev/null differ
diff --git a/docs/images/extstates4b.jpg b/docs/images/extstates4b.jpg
deleted file mode 100644
index d98e35312..000000000
Binary files a/docs/images/extstates4b.jpg and /dev/null differ
diff --git a/docs/images/object.png b/docs/images/object.png
new file mode 100644
index 000000000..e8ed128a8
Binary files /dev/null and b/docs/images/object.png differ
diff --git a/docs/images/sampling2.gif b/docs/images/sampling2.gif
deleted file mode 100644
index 79a3b2f11..000000000
Binary files a/docs/images/sampling2.gif and /dev/null differ
diff --git a/docs/index.rst b/docs/index.rst
index f26d580ed..19e43f2c5 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -21,20 +21,17 @@ Welcome to iGibson's documentation!
dataset.md
assets.md
- simulators.md
- physics_engine.md
- extended_states.md
renderer.md
- viewer.md
- environments.md
+ physics_engine.md
scenes.md
objects.md
robots.md
- sampling.md
+ simulators.md
+ viewer.md
+ environments.md
learning_framework.md
ros_integration.md
tests.md
- examples.md
.. toctree::
:maxdepth: 1
diff --git a/docs/installation.md b/docs/installation.md
index b0d963b40..72bd7cb6e 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -1,9 +1,7 @@
# Installation
There are two steps to install iGibson, the Interactive Gibson Environment, on your computer.
-First, you need to install the simulation environment. This may require installing additional dependencies. Then, you need to download the assets: models of the robotic agents, the interactive objects and 3D reconstructed real-world large environments for your agents to train.
-
-## Installing the Environment
+First, you need to install the simulation environment. Then, you need to download the assets: models of the robotic agents, the interactive objects and 3D reconstructed real-world large environments for your agents to train.
### System Requirements
@@ -31,35 +29,9 @@ The minimum system requirements are the following:
Other system configurations may work, but we haven't tested them extensively and we probably won't be able to provide as much support as we want.
-### Installing dependencies in Linux machines
-
-As most Python packages, we recommend to install iGibson in a virtual environment.
-We suggest to use Conda instead of a standard virtual environment.
-To setup anaconda with the right dependencies, run the following as your user account (**not as root/superuser**):
-
-```bash
-# Install miniconda
-curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
-bash Miniconda-latest-Linux-x86_64.sh
-rm Miniconda-latest-Linux-x86_64.sh
-
-# Add conda to your PATH
-echo "export PATH=$HOME/.miniconda/bin:$PATH" >> .bashrc
-
-# Update conda and create a virtual environment for iGibson
-conda update -y conda
-conda create -y -n igibson python=3.8
-conda activate igibson
-```
-
-
-
Careful, this may change your GPU drivers!
-There are several system dependencies to correctly run iGibson on Linux, mostly related to Nvidia drivers and Cuda.
-In case your system is a clean Ubuntu 20.04, you can run the following commands as root/superuser to install all required dependencies:
+## Installing dependencies
-
- Click to expand the code to install the dependencies including Nvidia drivers in headless mode to use for example in a cluster:
-
+Beginning with a clean ubuntu 20.04 installation, you **must run the following script as root/superuser** (`sudo su`) which will install all needed dependencies to build and run iGibson with CUDA 11.1:
```bash
# Add the nvidia ubuntu repositories
@@ -70,6 +42,8 @@ apt-get update && apt-get install -y --no-install-recommends \
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
# The following cuda libraries are required to compile igibson
+# Note: the following assumes you will be using nvidia drivers on a headless node
+# please use xserver-xorg-video-nvidia-470 if you are on a desktop
apt-get update && apt-get update && apt-get install -y --no-install-recommends \
nvidia-headless-470 \
cuda-cudart-11-1=11.1.74-1 \
@@ -84,41 +58,24 @@ apt-get update && apt-get install -y --no-install-recommends \
g++ \
libegl-dev
```
-
-
-
- Click to expand the code to install the dependencies including Nvidia drivers to render on screen for example on a desktop computer:
-
+
+Conda is recommended over standard virtual environments. To setup anaconda with the requisite dependencies, run the following as your user account (**not as root/superuser**):
```bash
-# Add the nvidia ubuntu repositories
-apt-get update && apt-get install -y --no-install-recommends \
- gnupg2 curl ca-certificates && \
- curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub | apt-key add - && \
- echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \
- echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
+# Install miniconda
+curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
+bash Miniconda-latest-Linux-x86_64.sh
+rm Miniconda-latest-Linux-x86_64.sh
-# The following cuda libraries are required to compile igibson
-apt-get update && apt-get update && apt-get install -y --no-install-recommends \
- xserver-xorg-video-nvidia-470 \
- cuda-cudart-11-1=11.1.74-1 \
- cuda-compat-11-1 \
- cuda-command-line-tools-11-1=11.1.1-1 \
- cuda-libraries-dev-11-1=11.1.1-1 \
+# Add conda to your PATH
+echo "export PATH=$HOME/.miniconda/bin:$PATH" >> .bashrc
-# For building and running igibson
-apt-get update && apt-get install -y --no-install-recommends \
- cmake \
- git \
- g++ \
- libegl-dev
+# Update conda and create a virtual environment for iGibson
+conda update -y conda
+conda create -y -n igibson python=3.8
+conda activate igibson
```
-
-
-
-
-
By default, iGibson builds with CUDA support which requires that `nvcc` is on your path (or CUDA 11 is symlinked to `/usr/local/cuda` from `/usr/local/cuda-11.1`). Cmake uses `nvcc` to find the CUDA libraries and headers when building iGibson. Add the following to your shell rc (`.bashrc`, `.zshrc`, etc.) and re-login to your shell (`exec bash`, `exec zsh`, etc.):
```bash
export PATH=/usr/local/cuda-11.1/bin:$PATH
@@ -134,23 +91,23 @@ Cuda compilation tools, release 11.1, V11.1.105
Build cuda_11.1.TC455_06.29190527_0
```
-In case you want to build without CUDA support (used for the "rendering to GPU tensor" feature), you will have to set `USE_CUDA` to `False` in `iGibson/igibson/render/CMakeLists.txt`.
+To build without CUDA support (used for the "rendering to GPU tensor" feature), you will have to set `USE_CUDA` to `False` in `iGibson/igibson/render/CMakeLists.txt`.
-### Installing iGibson
+## Installing the Environment
We provide 3 methods to install the simulator.
-#### 1. pip
+### 1. pip
iGibson's simulator can be installed as a python package using pip:
```bash
pip install igibson # This step takes about 4 minutes
# run the demo
-python -m igibson.examples.environments.env_nonint_example
+python -m igibson.examples.demo.demo_static
```
-#### 2. Docker image
+### 2. Docker image
Docker provides an easy way to reproduce the development environment across platforms without manually installing the software dependencies. We have prepared docker images that contain everything you need to get started with iGibson.
@@ -179,7 +136,7 @@ cd iGibson/docker/headless-gui
```
-#### 3. Compile from source
+### 3. Compile from source
Alternatively, iGibson can be compiled from source: [iGibson GitHub Repo](https://github.com/StanfordVL/iGibson). First, you need to install anaconda following the guide on [their website](https://www.anaconda.com/).
@@ -187,10 +144,8 @@ Alternatively, iGibson can be compiled from source: [iGibson GitHub Repo](https:
git clone https://github.com/StanfordVL/iGibson --recursive
cd iGibson
-# if you didn't create the conda environment before:
-conda create -y -n igibson python=3.8
-conda activate igibson
-
+conda create -n py3-igibson python=3.8 anaconda # we support python 2.7, 3.5, 3.6, 3.7, 3.8
+source activate py3-igibson
pip install -e . # This step takes about 4 minutes
```
@@ -198,20 +153,19 @@ We recommend the third method if you plan to modify iGibson in your project. If
Note: If you are not using conda, you will need the system packages python3-dev (header files to build Python extensions) and python3-opencv (provides opencv and its dependencies).
-### The SVL pybullet fork
-
-To optimize and accelerate physics simulation, we use a custom version of pybullet in iGibson. This is installed automatically if you install iGibson in a fresh conda environment, but if you already have a regular pybullet, you should manually remove it and install our fork as follows (otherwise your 'pip install -e .' will fail):
+## The SVL pybullet fork
+Note: we support using a custom pybullet version to speed up the physics in iGibson. This is installed automatically if you install iGibson. If you already have pybullet installed in your conda environment, you can replace it with our fork as follows:
```bash
pip uninstall pybullet
pip install pybullet-svl
```
-## Downloading the Assets and Datasets of Scenes and Objects
+## Downloading the Assets
-Once the environment has been installed, we need to download the assets to enable the simulation including models of the robotic agents, objects, 3D scenes, etc. This process requires three simple steps.
-
-First, we need to configure where the iGibson's assets are going to be stored. The desired path should be indicated in `your_installation_path/igibson/global_config.yaml`. The default place to store the data is:
+First, configure where iGibson's assets (robotic agents, objects, 3D environments, etc.) is going to be stored. It is configured in `your_installation_path/igibson/global_config.yaml`
+
+To make things easier, the default place to store the data is:
```bash
assets_path: your_installation_path/igibson/data/assets
g_dataset_path: your_installation_path/igibson/data/g_dataset
@@ -220,50 +174,47 @@ threedfront_dataset_path: your_installation_path/igibson/data/threedfront_datase
cubicasa_dataset_path: your_installation_path/igibson/data/assetscubicasa_dataset
```
-In case you prefer to store the assets in a different location, you can run the command:
+If you are happy with the default path, you don't have to do anything, otherwise you can run this script:
```bash
python -m igibson.utils.assets_utils --change_data_path
```
-Second, we need to download the robot models and some small objects from the assets bundle [here](https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz) and unpack it in the assets folder. More easily, this process can be automatically done by executing the command:
+Second, you can download our robot models and objects from [here](https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz) and unpack it in the assets folder, or simply run this download script:
```bash
python -m igibson.utils.assets_utils --download_assets
```
-Third, we need to download datasets of scenes (Gibson or iGibson), and, possibly, the BEHAVIOR Datasets of Object Models.
-For interactive tasks, you need to download iGibson 2.0 Scenes and the BEHAVIOR Dataset of Objects, or iGibson 1.0 Scenes. They include several fully interactive scenes and hundreds of 3D objects to use with our simulator.
-For navigation tasks, you could use the interactive scenes, but we also provide back-compatibility to the Gibson Dataset, Stanford 2D-3D-Semantics Dataset, and Matterport3D Dataset that include non-interactive scene models.
-Follow the detailed instructions [here](dataset.md) to download the aforementioned datasets.
-Alternatively, to avoid downloading an entire dataset before you can test iGibson's functionalities, we provide a single [high quality small non-interactive scene (R's)](https://storage.googleapis.com/gibson_scenes/Rs.tar.gz) for demo and testing purposes.
-To download this demo data, run:
+Third, you need to download some large 3D reconstructed real-world environments (e.g. houses and offices) from [our dataset](dataset.md) for your agents to be trained in. Create a new folder for those environments and set the path in `your_installation_path/igibson/global_config.yaml` (default and recommended: `your_installation_path/igibson/data/g_dataset` and `your_installation_path/igibson/data/ig_dataset`). You can get access and download the Gibson dataset (after filling up the following [license agreement](https://forms.gle/36TW9uVpjrE1Mkf9A)) and the iGibson dataset (following the guide [here](http://svl.stanford.edu/igibson/docs/dataset.html#download-instruction) or following the instructions below). In addition, you can download a single [high quality small environment R's](https://storage.googleapis.com/gibson_scenes/Rs.tar.gz) for demo purposes.
+
+To download the demo data, run:
```bash
python -m igibson.utils.assets_utils --download_demo_data
```
-## Examples
+The full Gibson and iGibson dataset can be downloaded using the following command, this script automatically downloads, decompresses, and puts the dataset to correct place. You will get `URL` after filling in the agreement form.
-We provide multiple examples to get you started!
-Check the folder [igibson/examples](https://github.com/StanfordVL/iGibson/tree/master/igibson/examples) and the description [here](examples.md).
-
-After installing the code and downloading the demo data, you should be able to try out a simple robot navigation demo executing:
+Download iGibson dataset
+```bash
+python -m igibson.utils.assets_utils --download_ig_dataset
+```
+Download Gibson dataset ([agreement signing](https://forms.gle/36TW9uVpjrE1Mkf9A) required to get `URL`)
```bash
-python -m igibson.examples.environments.env_nonint_example
+python -m igibson.utils.assets_utils --download_dataset URL
```
-## Testing
+## Testing
-To test iGibson installation, you can run
+To test igibson is properly installed, you can run
```bash
python
>> import igibson
```
For a full suite of tests and benchmarks, you can refer to [tests](tests.md) for more details.
-(For Mac users) Some tests will fail as they require a Nvidia GPU.
## Uninstalling
Uninstalling iGibson is easy: `pip uninstall igibson`
diff --git a/docs/intro.md b/docs/intro.md
index 9b931978d..85cc9df72 100644
--- a/docs/intro.md
+++ b/docs/intro.md
@@ -2,24 +2,20 @@
### Large Scale Interactive Simulation Environments for Robot Learning
-iGibson, the Interactive Gibson Environment, is a simulation environment providing fast visual rendering and physics simulation (based on Bullet).
-It is packed with a dataset with hundreds of large 3D environments reconstructed from real homes and offices, and interactive objects that can be pushed and actuated.
-iGibson allows researchers to train and evaluate robotic agents that use RGB images and/or other visual sensors to solve indoor (interactive) navigation and mobile manipulation tasks such as opening doors, picking and placing objects, or searching for objects.
-With the latest extension, iGibson 2.0 supports new types of [object state changes](extended_states.md) (cook, soak, slice, freeze, etc), that can enable new types of simulated activities!
-iGibson implements all features required to evaluate AI solutions in the BEHAVIOR benchmark: [sampling logic activity descriptions](sampling.md), [checking logic states](extended_states.md), connecting to the BEHAVIOR dataset of 3D objects and evaluating BEHAVIOR metrics (information about the [dataset](https://stanfordvl.github.io/behavior/objects.html) and the [metrics](https://stanfordvl.github.io/behavior/metrics.html) can be found in the documentation of the BEHAVIOR repository).
+iGibson, the Interactive Gibson Environment, is a simulation environment providing fast visual rendering and physics simulation (based on Bullet). It is packed with a dataset with hundreds of large 3D environments reconstructed from real homes and offices, and interactive objects that can be pushed and actuated. iGibson allows researchers to train and evaluate robotic agents that use RGB images and/or other visual sensors to solve indoor (interactive) navigation and manipulation tasks such as opening doors, picking and placing objects, or searching in cabinets.
### Citation
-If you use iGibson or its assets and models, consider citing the following publications:
+If you use iGibson or its assets and models, consider citing the following publication:
```
-@inproceedings{shen2021igibson,
+@misc{shen2021igibson,
title={iGibson 1.0: a Simulation Environment for Interactive Tasks in Large Realistic Scenes},
- author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Mart\'in-Mart\'in and Linxi Fan and Guanzhi Wang and Claudia Pérez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese},
- booktitle={2021 IEEE/RSJ International Conference on Intelligent Robots and Systems},
+ author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Martín-Martín and Linxi Fan and Guanzhi Wang and Claudia Pérez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese},
year={2021},
- pages={accepted},
- organization={IEEE}
+ eprint={2012.02924},
+ archivePrefix={arXiv},
+ primaryClass={cs.AI}
}
```
@@ -35,9 +31,9 @@ If you use iGibson or its assets and models, consider citing the following publi
```
### Code Release
-The GitHub repository of iGibson can be found [here](https://github.com/StanfordVL/iGibson). Bug reports, suggestions for improvement, as well as community developments are encouraged and appreciated.
+The GitHub repository of iGibson can be found here: [iGibson GitHub Repo](https://github.com/StanfordVL/iGibson). Bug reports, suggestions for improvement, as well as community developments are encouraged and appreciated.
### Documentation
-The documentation for iGibson can be found [here](http://svl.stanford.edu/igibson/docs/). It includes installation guide (including data download instructions), quickstart guide, code examples, and APIs.
+The documentation for iGibson can be found here: [iGibson Documentation](http://svl.stanford.edu/igibson/docs/). It includes installation guide (including data download instructions), quickstart guide, code examples, and APIs.
-If you want to know more about iGibson, you can also check out [our webpage](http://svl.stanford.edu/igibson), the [iGibson 2.0 arxiv preprint](https://arxiv.org/abs/2108.03272) and the [iGibson 1.0 arxiv preprint](https://arxiv.org/abs/2012.02924).
+If you want to know more about iGibson, you can also check out [our webpage](http://svl.stanford.edu/igibson), [iGibson 2.0 arxiv preprint](https://arxiv.org/abs/2108.03272) and [iGibson 1.0 arxiv preprint](https://arxiv.org/abs/2012.02924).
diff --git a/docs/objects.md b/docs/objects.md
index cb91f4ff7..022aa7814 100644
--- a/docs/objects.md
+++ b/docs/objects.md
@@ -91,176 +91,55 @@ Instruction can be found here: [External Objects](https://github.com/StanfordVL/
### Examples
-In this example, we import a few objects into iGibson. The code can be found here: [igibson/examples/objects/load_objects.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/objects/load_objects.py).
+In this example, we import three objects into PyBullet, two of which are articulated objects. The code can be found here: [igibson/examples/demo/object_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/object_example.py).
```python
-import logging
-import os
-from sys import platform
-
-import numpy as np
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.objects.articulated_object import URDFObject
from igibson.objects.ycb_object import YCBObject
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.robots.turtlebot import Turtlebot
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path
-from igibson.utils.utils import let_user_pick, parse_config
+from igibson.objects.articulated_object import ArticulatedObject
+import igibson
+import os
+import pybullet as p
+import pybullet_data
+import time
def main():
- """
- This demo shows how to load scaled objects from the iG object model dataset and
- additional objects from the YCB dataset in predefined locations
- Loads a concrete object model of a table, and a random one of the same category, and 10 cracker boxes
- The objects can be loaded into an empty scene, an interactive scene (iG) or a static scene (Gibson)
- The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots
- and executing actions
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- scene_options = ["Empty scene", "Interactive scene (iG)", "Static scene (Gibson)"]
- type_of_scene = let_user_pick(scene_options) - 1
-
- if type_of_scene == 0: # Empty
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- # scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
- s.import_robot(turtlebot)
-
- elif type_of_scene == 1: # iG
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building
- config_data["visible_target"] = False
- config_data["visible_path"] = False
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- s = env.simulator
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1./240.)
- elif type_of_scene == 2: # Gibson
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
+ floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
+ p.loadMJCF(floor)
- scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=False)
- s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
- s.import_robot(turtlebot)
+ cabinet_0007 = os.path.join(
+ igibson.assets_path, 'models/cabinet2/cabinet_0007.urdf')
+ cabinet_0004 = os.path.join(
+ igibson.assets_path, 'models/cabinet/cabinet_0004.urdf')
- # Set a better viewing direction
- s.viewer.initial_pos = [-2, 1.4, 1.2]
- s.viewer.initial_view_direction = [0.6, -0.8, 0.1]
- s.viewer.reset_viewer()
+ obj1 = ArticulatedObject(filename=cabinet_0007)
+ obj1.load()
+ obj1.set_position([0, 0, 0.5])
- # Objects to load: two tables, the first one is predefined model, the second, random for the same category
- table_objects_to_load = {
- "table_1": {
- "category": "breakfast_table",
- "model": "1b4e6f9dd22a8c628ef9d976af675b86",
- "pos": (0.0, -0.2, 1.01),
- "orn": (0, 0, 90),
- },
- "table_2": {
- "category": "breakfast_table",
- "pos": (0.5, -2.0, 1.01),
- "orn": (0, 0, 45),
- },
- }
+ obj2 = ArticulatedObject(filename=cabinet_0004)
+ obj2.load()
+ obj2.set_position([0, 0, 2])
- # Load the specs of the object categories, e.g., common scaling factor
- avg_category_spec = get_ig_avg_category_specs()
+ obj3 = YCBObject('003_cracker_box')
+ obj3.load()
+ obj3.set_position_orientation([0, 0, 1.2], [0, 0, 0, 1])
- scene_objects = {}
- try:
- for obj in table_objects_to_load.values():
- category = obj["category"]
- if category in scene_objects:
- scene_objects[category] += 1
- else:
- scene_objects[category] = 1
+ for _ in range(24000): # at least 100 seconds
+ p.stepSimulation()
+ time.sleep(1./240.)
- # Get the path for all models of this category
- category_path = get_ig_category_path(category)
+ p.disconnect()
- # If the specific model is given, we use it. If not, we select one randomly
- if "model" in obj:
- model = obj["model"]
- else:
- model = np.random.choice(os.listdir(category_path))
- # Create the full path combining the path for all models and the name of the model
- model_path = get_ig_model_path(category, model)
- filename = os.path.join(model_path, model + ".urdf")
-
- # Create a unique name for the object instance
- obj_name = "{}_{}".format(category, scene_objects[category])
-
- # Create and import the object
- simulator_obj = URDFObject(
- filename,
- name=obj_name,
- category=category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(category),
- fit_avg_dim_volume=True,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=obj["pos"],
- initial_orn=obj["orn"],
- )
- s.import_object(simulator_obj)
-
- for _ in range(10):
- obj = YCBObject("003_cracker_box")
- s.import_object(obj)
- obj.set_position_orientation(np.append(np.random.uniform(low=0, high=2, size=2), [1.8]), [0, 0, 0, 1])
-
- if type_of_scene == 1:
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- # action = env.action_space.sample()
- state, reward, done, info = env.step([0.1, 0.1])
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- else:
- for i in range(10000):
- with Profiler("Simulator step"):
- turtlebot.apply_action([0.1, 0.1])
- s.step()
- rgb = s.renderer.render_robot_cameras(modes=("rgb"))
-
- finally:
- if type_of_scene == 1:
- env.close()
- else:
- s.disconnect()
+if __name__ == '__main__':
+ main()
+```
-if __name__ == "__main__":
- main()
+You can open the cabinet and the drawer by dragging your mouse over them. You can even put the cereal box into the drawer like this:
+![object](images/object.png)
-```
\ No newline at end of file
diff --git a/docs/overview.md b/docs/overview.md
index 182921208..96a5ea4c2 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -1,38 +1,16 @@
-# Overview of Modules
+# Overview
Next, we will give an overview of iGibson and briefly explain the different modules in our system.
![quickstart.png](images/overview.png)
-First of all, we have **Datasets** and **Assets**.
-**Datasets** include the 3D reconstructed real-world environments (iGibson Dataset), and 3D models of objects (BEHAVIOR Dataset of Objects).
-**Assets** contain models of robots and some additional 3D objects.
-The download guide for the assets can be found [here](installation.html#downloading-the-assets).
-More info can be found in the sections [Datasets](dataset.md) and [Assets](assets.md).
+First of all, we have **Dataset** and **Assets**. **Dataset** contain 3D reconstructed real-world environments. **Assets** contain models of robots and objects. Download guide can be found [here](installation.html#downloading-the-assets). More info can be found here: [Dataset](dataset.md) and [Assets](assets.md).
-Next, we have **Renderer** and **PhysicsEngine**.
-These are the two pillars that ensure the visual and physics fidelity of iGibson.
-We developed our own renderer that supports customizable camera configuration, physics-based rendering (PBR) and various image modalities, and renders at a lightening speed.
-We use the open-sourced [PyBullet](http://www.pybullet.org/) as our underlying physics engine.
-It can simulate rigid body collision and joint actuation for robots and articulated objects in an accurate and efficient manner.
-Since we are using MeshRenderer for rendering and PyBullet for physics simulation, we need to keep them synchronized at all time.
-Our code have already handled this for you. More info can be found here: [Renderer](renderer.md) and [PhysicsEngine](physics_engine.md).
+Next, we have **Renderer** and **PhysicsEngine**. These are the two pillars that ensure the visual and physics fidelity of iGibson. We developed our own MeshRenderer that supports customizable camera configuration, physics-based rendering (PBR) and various image modalities, and renders at a lightening speed. We use the open-sourced [PyBullet](http://www.pybullet.org/) as our underlying physics engine. It can simulate rigid body collision and joint actuation for robots and articulated objects in an accurate and efficient manner. Since we are using MeshRenderer for rendering and PyBullet for physics simulation, we need to keep them synchronized at all time. Our code have already handled this for you. More info can be found here: [Renderer](renderer.md) and [PhysicsEngine](physics_engine.md).
-Furthermore, we have **Scene**, **Object**, **Robot**, and **Simulator**.
-**Scene** loads 3D scene meshes from `igibson.g_dataset_path, igibson.ig_dataset_path` and loads/holds the list of objects associated with an interactive scene.
-**Object** loads interactable objects from `igibson.assets_path`.
-**Robot** loads robots from `igibson.assets_path`.
-**Simulator** maintains an instance of **Renderer** and **PhysicsEngine** and provides APIs to import **Scene**, **Object** and **Robot** into both of them and keep them synchronized at all time.
-More info can be found here: [Scene](./scenes.md), [Object](./objects.md), [Robot](./robots.md), and [Simulator](simulators.md).
+Furthermore, we have **Scene**, **Object**, **Robot**, and **Simulator**. **Scene** loads 3D scene meshes from `igibson.g_dataset_path, igibson.ig_dataset_path`. **Object** loads interactable objects from `igibson.assets_path`. **Robot** loads robots from `igibson.assets_path`. **Simulator** maintains an instance of **Renderer** and **PhysicsEngine** and provides APIs to import **Scene**, **Object** and **Robot** into both of them and keep them synchronized at all time. More info can be found here: [Scene](./scenes.md), [Object](./objects.md), [Robot](./robots.md), and [Simulator](simulators.md).
-Moreover, we have **Task**, **Sensor** and **Environment**.
-**Task** defines the task setup and includes a list of **Reward Function** and **Termination Condition**.
-It also provides task-specific reset functions and task-relevant observation definition.
-**Sensor** provides a light wrapper around **Render** to retrieve sensory observation.
-**Environment** follows the [OpenAI gym](https://github.com/openai/gym) convention and provides an API interface for external applications.
-More info can be found here: [Environment](environments.md).
+Moreover, we have **Task**, **Sensor** and **Environment**. **Task** defines the task setup and includes a list of **Reward Function** and **Termination Condition**. It also provides task-specific reset functions and task-relevant observation definition. **Sensor** provides a light wrapper around **Render** to retrieve sensory observation. **Environment** follows the [OpenAI gym](https://github.com/openai/gym) convention and provides an API interface for external applications. More info can be found here: [Environment](environments.md).
-Finally, any learning framework (e.g. RL, IL) or planning and control framework (e.g. ROS) can be used with **Environment** as long as they accommodate the OpenAI gym interface.
-We provide tight integration with **ROS** that allows for evaluation and visualization of, say, ROS Navigation Stack, in iGibson.
-More info can be found here: [Learning Framework](learning_framework.md) and [ROS](ros_integration.md).
+Finally, any learning framework (e.g. RL, IL) or planning and control framework (e.g. ROS) can be used with **Environment** as long as they accommodate OpenAI gym interface. We provide tight integration with **ROS** that allows for evaluation and visualization of, say, ROS Navigation Stack, in iGibson. More info can be found here: [Learning Framework](learning_framework.md) and [ROS](ros_integration.md).
We highly recommend you go through each of the Modules below for more details and code examples.
diff --git a/docs/physics_engine.md b/docs/physics_engine.md
index 7e444bf3a..43b1dc8c9 100644
--- a/docs/physics_engine.md
+++ b/docs/physics_engine.md
@@ -8,7 +8,7 @@ Typically, we use `p.createMultiBody` and `p.loadURDF` to load scenes, objects a
More info can be found in here: [PyBullet documentation](https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA).
### Examples
-In this example, we import a scene, a robot and an object into PyBullet and step through a few seconds of simulation.
+In this example, we import a scene, a robot and an object into PyBullet and step through a few seconds of simulation. The code can be found here:[igibson/examples/demo/physics_engine_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/physics_engine_example.py).
```python
import pybullet as p
diff --git a/docs/quickstart.md b/docs/quickstart.md
index c4f937ecd..e72c30fbe 100644
--- a/docs/quickstart.md
+++ b/docs/quickstart.md
@@ -4,12 +4,9 @@
Assume you finished installation and assets downloading. Let's get our hands dirty and see iGibson in action.
```bash
-python -m igibson.examples.environments.env_nonint_example
+python -m igibson.examples.demo.env_example
```
-
-If the execution fails with segfault 11, you may need to reduce texture scaling in the config file (igibson/configs/turtlebot_static_nav.yaml) to avoid out-of-memory error.
-
-You should see something like this. If you are on Mac OS X, you will only see the two small windows.
+You should see something like this:
![quickstart.png](images/quickstart.png)
The main window shows PyBullet visualization. The robot (TurtleBot) is moving around with random actions in a realistic house (called "Rs", the one you just downloaded!).
diff --git a/docs/renderer.md b/docs/renderer.md
index ec6ce23e7..73b975d24 100644
--- a/docs/renderer.md
+++ b/docs/renderer.md
@@ -8,53 +8,38 @@ We developed our own MeshRenderer that supports customizable camera configuratio
#### Simple Example
-In this example, we render an iGibson scene with a few lines of code. The code can be found in [igibson/examples/renderer/mesh_renderer_simple_example.py ](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/renderer/mesh_renderer_simple_example.py).
+In this example, we render an iGibson scene with a few lines of code. The code can be found in [igibson/examples/demo/mesh_renderer_simple_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/mesh_renderer_simple_example.py).
```
-import logging
-import os
-import sys
-
import cv2
+import sys
+import os
import numpy as np
-
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
from igibson.utils.assets_utils import get_scene_path
def main():
- """
- Minimal example of use of the renderer. Loads Rs (non interactive), renders one set of images (RGB, normals,
- 3D points (as depth)), shows them.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- # If a model is given, we load it, otherwise we load Rs mesh (non interactive)
if len(sys.argv) > 1:
model_path = sys.argv[1]
else:
- model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj")
+ model_path = os.path.join(get_scene_path('Rs'), 'mesh_z_up.obj')
renderer = MeshRenderer(width=512, height=512)
renderer.load_object(model_path)
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
camera_pose = np.array([0, 0, 1.2])
view_direction = np.array([1, 0, 0])
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
renderer.set_fov(90)
- frames = renderer.render(modes=("rgb", "normal", "3d"))
-
- # Render 3d points as depth map
- depth = np.linalg.norm(frames[2][:, :, :3], axis=2)
- depth /= depth.max()
- frames[2][:, :, :3] = depth[..., None]
-
+ frames = renderer.render(
+ modes=('rgb', 'normal', '3d'))
frames = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR)
- cv2.imshow("image", frames)
+ cv2.imshow('image', frames)
cv2.waitKey(0)
-if __name__ == "__main__":
+if __name__ == '__main__':
main()
```
@@ -66,9 +51,9 @@ For `Rs` scene, the rendering results will look like this:
In this example, we show an interactive demo of MeshRenderer.
```bash
-python -m igibson.examples.renderer.mesh_renderer_example
+python -m igibson.examples.demo.mesh_renderer_example
```
-You may translate the camera by pressing "WASD" on your keyboard and rotate the camera by dragging your mouse. Press `Q` to exit the rendering loop. The code can be found in [igibson/examples/renderer/mesh_renderer_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/renderer/mesh_renderer_example.py).
+You may translate the camera by pressing "WASD" on your keyboard and rotate the camera by dragging your mouse. Press `Q` to exit the rendering loop. The code can be found in [igibson/examples/demo/mesh_renderer_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/mesh_renderer_example.py).
#### PBR (Physics-Based Rendering) Example
@@ -77,7 +62,7 @@ You can test the physically based renderer with the PBR demo. You can render any
obj files in the folder.
```bash
-python -m igibson.examples.renderer.mesh_renderer_example_pbr /objects/sink/sink_1/shape/visual
+python -m igibson.examples.demo.mesh_renderer_example_pbr /objects/sink/sink_1/shape/visual
```
![pbr_renderer.png](images/pbr_render.png)
@@ -86,54 +71,12 @@ You will get a nice rendering of the sink, and should see the metal parts have s
#### Velodyne VLP-16 Example
-In this example, we show a demo of 16-beam Velodyne VLP-16 LiDAR placed on top of a virtual Turtlebot. The code can be found in [igibson/examples/observations/generate_lidar_velodyne.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/observations/generate_lidar_velodyne.py).
+In this example, we show a demo of 16-beam Velodyne VLP-16 LiDAR placed on top of a virtual Turtlebot. The code can be found in [igibson/examples/demo/lidar_velodyne_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/lidar_velodyne_example.py).
The Velodyne VLP-16 LiDAR visualization will look like this:
![lidar_velodyne.png](images/lidar_velodyne.png)
#### Render to PyTorch Tensors
-In this example, we show that MeshRenderer can directly render into a PyTorch tensor to maximize efficiency. PyTorch installation is required (otherwise, iGibson does not depend on PyTorch). The code can be found in [igibson/examples/renderer/mesh_renderer_gpu_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/renderer/mesh_renderer_gpu_example.py).
-
-
-#### About the 3D Image
-
-The mode '3d' provides a 4-channeled image where the first three channels correspond to the x, y, and z coordinates of the pixels in the image. Because our code internally uses OpenGL for rendering, the coordinates are defined in the common convention of this framework: for a given image, the x axis points from left to right, the y axis points from bottom to top, and the z axis points in the opposite direction of the viewing direction of the camera. The camera is located at the location of the link frame "eyes" of the robot, but the orientation is different and defined in the following way: the x axis points along the viewing direction of the camera, the y axis points from right to left and the z axis points from bottom to top. The following code can be helpful to transform points between reference frames:
-
-```
-# Pose of the camera of the simulated robot in world frame
-eye_pos, eye_orn = self.robot.links["eyes"].get_position_orientation()
-camera_in_wf = quat2rotmat(xyzw2wxyz(eye_orn))
-camera_in_wf[:3,3] = eye_pos
-
-# Transforming coordinates of points from opengl frame to camera frame
-camera_in_openglf = quat2rotmat(euler2quat(np.pi / 2.0, 0, -np.pi / 2.0))
-
-# Pose of the simulated robot in world frame
-robot_pos, robot_orn = self.robot.get_position_orientation()
-robot_in_wf = quat2rotmat(xyzw2wxyz(robot_orn))
-robot_in_wf[:3, 3] = robot_pos
-
-# Pose of the camera in robot frame
-cam_in_robot_frame = np.dot(np.linalg.inv(robot_in_wf), camera_in_wf)
-
-u = 0
-v = 0
-[td_image] = self.env.simulator.renderer.render(modes=('3d'))
-point_in_openglf = td_image[u, v]
-point_in_cf = np.dot(camera_in_openglf, point_in_openglf)
-point_in_rf = np.dot(cam_in_robot_frame, point_in_cf)
-point_in_wf = np.dot(robot_in_wf, point_in_rf)
-```
-
-#### About the Semantic Segmentation Image
-
-The mode 'seg' and 'ins_seg' provides a 4-channeled image where the first channel corresponds to the semantic segmentation and instance segmentation, respectively. The values are normalized between 0 and 1, with a normalizing constant of `MAX_CLASS_COUNT = 512` and `MAX_INSTANCE_COUNT = 1024` (defined in `utils/constants.py`). The following code is helpful to unnormalize the segmentation image:
-
-```
-[seg, ins_seg] = self.env.simulator.renderer.render(modes=('seg', 'ins_seg'))
-seg = (seg[:, :, 0:1] * MAX_CLASS_COUNT).astype(np.int32)
-ins_seg = (ins_seg[:, :, 0:1] * MAX_INSTANCE_COUNT).astype(np.int32)
-```
+In this example, we show that MeshRenderer can directly render into a PyTorch tensor to maximize efficiency. PyTorch installation is required (otherwise, iGibson does not depend on PyTorch). The code can be found in [igibson/examples/demo/mesh_renderer_gpu_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/mesh_renderer_gpu_example.py).
-This transformation is directly performed if the segmentation is accessed through a `VisionSensor` (e.g., as part of the iGibsonEnv) using the method `get_seg` and `get_ins_seg`.
diff --git a/docs/robots.md b/docs/robots.md
index 7dff1bc1c..2359ab89d 100644
--- a/docs/robots.md
+++ b/docs/robots.md
@@ -18,11 +18,11 @@ We provide a wide variety of **Robots** that can be imported into the **Simulato
Typically, these robot classes take in the URDF file or MuJoCo XML file of an robot (in `igibson.assets_path`) and provide a `load` function that be invoked externally (usually by `import_robot` of `Simulator`). The `load` function imports the robot into PyBullet.
-All robot clases inherit `LocomotionRobot`. Some useful functions are worth pointing out:
+All robot clases inherit `LocomotorRobot`. Some useful functions are worth pointing out:
- `{get/set}_{position/orientation/rpy/linear_velocity/angular_velocity}`: get and set the physical states of the robot base
- `apply_robot_action`: set motor control for each of the controllable joints. It currently supports four modes of control: joint torque, velocity, position, and differential drive for two-wheeled robots
- `calc_state`: compute robot states that might be useful for external applications
-- `reset`: reset the robot joint states to their default value, particularly useful for mobile manipulators. For instance, `Fetch.reset()` will reset the robot to be something like this:
+- `robot_specific_reset`: reset the robot joint states to their default value, particularly useful for mobile manipulators. For instance, `Fetch.robot_specific_reset()` will reset the robot to be something like this:
![fetch.png](images/fetch.png)
@@ -33,8 +33,8 @@ Here are some details about how we perform motor control for robots:
- `robot_action` will be applied by `apply_robot_action`, which internally executes the following:
```python
def apply_robot_action(action):
- for n, j in enumerate(self.joints.values()):
- j.set_vel(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1)))
+ for n, j in enumerate(self.ordered_joints):
+ j.set_motor_velocity(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1)))
```
Note that `robot_action` is a normalized joint velocity, i.e. `robot_action[n] == 1.0` means executing the maximum joint velocity for the nth joint. The limits of joint position, velocity and torque are extracted from the URDF file of the robot.
@@ -56,72 +56,74 @@ The reference frame of each body part is shown below.
### Examples
-In this example, we import four different robots into PyBullet. We keep them still for around 10 seconds and then move them with small random actions for another 10 seconds. The code can be found here: [igibson/examples/robots/robot_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/robots/robot_example.py).
+In this example, we import four different robots into PyBullet. We keep them still for around 10 seconds and then move them with small random actions for another 10 seconds. The code can be found here: [igibson/examples/demo/robot_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/robot_example.py).
```python
-import logging
+from igibson.robots.locobot_robot import Locobot
+from igibson.robots.turtlebot_robot import Turtlebot
+from igibson.robots.jr2_kinova_robot import JR2_Kinova
+from igibson.robots.fetch_robot import Fetch
+from igibson.utils.utils import parse_config
import os
-
+import time
import numpy as np
-
+import pybullet as p
+import pybullet_data
import igibson
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.robots import REGISTERED_ROBOTS
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.utils import parse_config
-
def main():
- """
- Robot demo
- Loads all robots in an empty scene, generate random actions
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- # Create empty scene
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- # Create one instance of each robot aligned along the y axis
- position = [0, 0, 0]
- robots = {}
- for robot_config_file in os.listdir(os.path.join(igibson.example_config_path, "robots")):
- config = parse_config(os.path.join(igibson.example_config_path, "robots", robot_config_file))
- robot_config = config["robot"]
- robot_name = robot_config.pop("name")
- robot = REGISTERED_ROBOTS[robot_name](**robot_config)
- s.import_robot(robot)
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1./240.)
+
+ floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
+ p.loadMJCF(floor)
+
+ robots = []
+ config = parse_config(os.path.join(igibson.example_config_path, 'fetch_reaching.yaml'))
+ fetch = Fetch(config)
+ robots.append(fetch)
+
+ config = parse_config(os.path.join(igibson.example_config_path,'jr_reaching.yaml'))
+ jr = JR2_Kinova(config)
+ robots.append(jr)
+
+ config = parse_config(os.path.join(igibson.example_config_path, 'locobot_point_nav.yaml'))
+ locobot = Locobot(config)
+ robots.append(locobot)
+
+ config = parse_config(os.path.join(igibson.example_config_path, 'turtlebot_point_nav.yaml'))
+ turtlebot = Turtlebot(config)
+ robots.append(turtlebot)
+
+ positions = [
+ [0, 0, 0],
+ [1, 0, 0],
+ [0, 1, 0],
+ [1, 1, 0]
+ ]
+
+ for robot, position in zip(robots, positions):
+ robot.load()
robot.set_position(position)
- robot.reset()
+ robot.robot_specific_reset()
robot.keep_still()
- robots[robot_name] = (robot, position[1])
- logging.info("Loaded " + robot_name)
- logging.info("Moving " + robot_name)
- # Set viewer in front
- s.viewer.initial_pos = [1.6, 0, 1.3]
- s.viewer.initial_view_direction = [-0.7, 0, -0.7]
- s.viewer.reset_viewer()
-
- for _ in range(100): # keep still for 10 seconds
- s.step()
-
- for _ in range(30):
+
+ for _ in range(2400): # keep still for 10 seconds
+ p.stepSimulation()
+ time.sleep(1./240.)
+
+ for _ in range(2400): # move with small random actions for 10 seconds
+ for robot, position in zip(robots, positions):
action = np.random.uniform(-1, 1, robot.action_dim)
robot.apply_action(action)
- for _ in range(10):
- s.step()
-
- robot.keep_still()
- s.reload()
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
+ p.stepSimulation()
+ time.sleep(1./240.0)
- s.disconnect()
+ p.disconnect()
-if __name__ == "__main__":
+if __name__ == '__main__':
main()
```
The four robots will have a fun cocktail party like this:
diff --git a/docs/sampling.md b/docs/sampling.md
deleted file mode 100644
index 7a4496317..000000000
--- a/docs/sampling.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Sampling Scene Instances
-
-The [logic states](extended_states.md) implemented in iGibson since the version 2.0 provide a mechanism that facilitates the generation of simulated scenes to study and develop robotic solutions.
-Users of iGibson can specify the desired configuration of the environment in the logic language [BDDL](https://github.com/StanfordVL/bddl#readme), part of the [BEHAVIOR benchmark](behavior.stanford.edu).
-This language has similarities to the Planning Domain Definition Language (PDDL), allowing researchers to define scenes in logic-semantic manner (e.g., objects on top, next, inside of others) instead of the time consuming and tedious work of specifying manually their positions.
-Given a scene definition in BDDL, iGibson provides the functionalities to sample compliant instances of the logic description to be used in simulation.
-The image below shows an example of different instances sampled from the same logic description (three books on a table).
-
-![sampling2.gif](images/sampling2.gif)
-
-The first step to generate a new activity in iGibson is to create its BDDL description.
-Please, follow the instructions [here](https://behavior.stanford.edu/activity-annotation) to create your own BDDL description using our online web interface, or modify some of the existing descriptions included as part of BEHAVIOR (see [here](https://github.com/StanfordVL/bddl/tree/master/bddl/activity_definitions)).
-
-The next step is to download and install [BDDL](https://github.com/StanfordVL/bddl). Place your own BDDL description under `bddl/activity_definitions//problem0.bddl`.
-
-Then you can select a *vanilla scene* to instantiate the BDDL description on.
-We provide 15 scenes as part of the iGibson dataset, furnished and with clutter objects to be specialized for multiple activities. See the available scenes in `ig_dataset/scenes` or [here](http://svl.stanford.edu/igibson/docs/dataset.html).
-
-With the BDDL description and a list of scene names, iGibson can generate valid scene instances of the description with the following script:
-```
-python -m igibson.utils.data_utils.sampling_task.sampling_saver --task --task_id 0 --scenes ...
-```
-
-The script will sample possible poses of object models of the indicated classes until all conditions are fulfilled.
-The result will be stored as `ig_dataset/scenes//urdf/_task__0_0.urdf`, a description of the scene with additional objects that fulfill the initial conditions in the BDDL description.
-The user should ensure that the definition is sampleable in the given scene. Otherwise, after a certain number of sampling attempts, the script will fail and return.
-
-We recommend to use the BEHAVIOR Dataset of 3D objects to get access to hundreds of object models to create new activities.
-
diff --git a/docs/scenes.md b/docs/scenes.md
index 4c8121c97..e90bd4274 100644
--- a/docs/scenes.md
+++ b/docs/scenes.md
@@ -6,7 +6,7 @@ We provide four types of scenes.
- `StaticIndoorScene`: it loads static 3D scenes from `igibson.g_dataset_path`.
- `InteractiveIndoorScene`: it loads fully interactive 3D scenes from `igibson.ig_dataset_path`.
-Typically, they take in the `scene_id` of a scene and provide a `load` function that be invoked externally (usually by `import_scene` of the `Simulator`).
+Typically, they take in the `scene_id` of a scene and provide a `load` function that be invoked externally (usually by `import_scene` and `import_ig_scene` of the `Simulator`).
To be more specific, the `load` function of `StaticIndoorScene`
- stores the floor information (we have many multistory houses in our dataset)
@@ -35,37 +35,30 @@ Instruction can be found here: [External Scenes](https://github.com/StanfordVL/i
#### Stadium Scenes
-In this example, we import a simple stadium scene that is good for debugging. The code can be found here: [igibson/examples/scenes/stadium_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/stadium_example.py).
+In this example, we import a simple stadium scene that is good for debugging. The code can be found here: [igibson/examples/demo/scene_stadium_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_stadium_example.py).
```python
-import logging
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
from igibson.scenes.stadium_scene import StadiumScene
-from igibson.simulator import Simulator
-
+import pybullet as p
+import numpy as np
+import time
def main():
- """
- Loads the Stadium scene
- This scene is default in pybullet but is not really useful in iGibson
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
+ p.connect(p.GUI)
+ p.setGravity(0,0,-9.8)
+ p.setTimeStep(1./240.)
scene = StadiumScene()
- s.import_scene(scene)
+ scene.load()
- while True:
- with Profiler("Simulator step"):
- s.step()
- s.disconnect()
+ for _ in range(24000): # at least 100 seconds
+ p.stepSimulation()
+ time.sleep(1./240.)
+
+ p.disconnect()
-if __name__ == "__main__":
+if __name__ == '__main__':
main()
```
@@ -74,156 +67,98 @@ The stadium scene looks like this:
#### Static Building Scenes
-In this example, we import a static scene, and then randomly sample a pair of locations in the scene and compuete the shortest path between them. The code can be found here: [igibson/examples/scenes/g_scene_selector.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/g_scene_selector.py).
+In this example, we import a static scene, and then randomly sample a pair of locations in the scene and compuete the shortest path between them. The code can be found here: [igibson/examples/demo/scene_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_example.py).
```python
-import logging
-from sys import platform
-
+from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
+import pybullet as p
import numpy as np
+import time
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_available_g_scenes
-from igibson.utils.utils import let_user_pick
+def main():
+ p.connect(p.GUI)
+ p.setGravity(0,0,-9.8)
+ p.setTimeStep(1./240.)
+ scene = BuildingScene('Rs',
+ build_graph=True,
+ pybullet_load_texture=True)
+ scene.load()
-def main():
- """
- Prompts the user to select any available non-interactive scene and loads it.
- Shows how to load directly scenes without the Environment interface
- Shows how to sample points in the scene and how to compute geodesic distance and the shortest path
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- available_g_scenes = get_available_g_scenes()
- scene_id = available_g_scenes[let_user_pick(available_g_scenes) - 1]
- settings = MeshRendererSettings(enable_shadow=True, msaa=False)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- scene = StaticIndoorScene(
- scene_id,
- build_graph=True,
- )
- s.import_scene(scene)
-
- # Shows how to sample points in the scene
np.random.seed(0)
for _ in range(10):
random_floor = scene.get_random_floor()
- p1 = scene.get_random_point(random_floor)[1]
- p2 = scene.get_random_point(random_floor)[1]
+ p1 = scene.get_random_point(floor=random_floor)[1]
+ p2 = scene.get_random_point(floor=random_floor)[1]
shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True)
- logging.info("Random point 1: {}".format(p1))
- logging.info("Random point 2: {}".format(p2))
- logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance))
- logging.info("Shortest path from p1 to p2: {}".format(shortest_path))
+ print('random point 1:', p1)
+ print('random point 2:', p2)
+ print('geodesic distance between p1 and p2', geodesic_distance)
+ print('shortest path from p1 to p2:', shortest_path)
- input("Press enter")
+ for _ in range(24000): # at least 100 seconds
+ p.stepSimulation()
+ time.sleep(1./240.)
- while True:
- with Profiler("Simulator step"):
- s.step()
- s.disconnect()
+ p.disconnect()
-if __name__ == "__main__":
+if __name__ == '__main__':
main()
```
-
#### Interactive Building Scenes
-In this example, we import a fully interactive scene, and randomly sample points given a room type such as "living_room". This can be useful for tasks that require the robot to always be spawned in certain room types. We support fifteen such scenes right now as part of the new iGibson Dataset. The code can be found here: [igibson/examples/scenes/ig_scene_selector.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/ig_scene_selector.py).
+In this example, we import a fully interactive scene, and randomly sample points given a room type such as "living_room". This can be useful for tasks that require the robot to always be spawned in certain room types. We support fifteen such scenes right now as part of the new iGibson Dataset. The code can be found here: [igibson/examples/demo/scene_interactive_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_example.py).
Note that all objects in these scenes can be interacted realistically.
![scene_interactive.png](images/scene_interactive.png)
```python
-import logging
-from sys import platform
-
-import numpy as np
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_available_ig_scenes
-from igibson.utils.utils import let_user_pick
+import numpy as np
def main():
- """
- Prompts the user to select any available interactive scene and loads it.
- Shows how to load directly scenes without the Environment interface
- Shows how to sample points in the scene by room type and how to compute geodesic distance and the shortest path
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- available_ig_scenes = get_available_ig_scenes()
- scene_id = available_ig_scenes[let_user_pick(available_ig_scenes) - 1]
- settings = MeshRendererSettings(enable_shadow=True, msaa=False)
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
+ s = Simulator(mode='gui', image_width=512,
+ image_height=512, device_idx=0)
scene = InteractiveIndoorScene(
- scene_id,
- load_object_categories=[], # To load only the building. Fast
- build_graph=True,
- )
- s.import_scene(scene)
+ 'Rs_int', texture_randomization=False, object_randomization=False)
+ s.import_ig_scene(scene)
- # Shows how to sample points in the scene
np.random.seed(0)
for _ in range(10):
- pt = scene.get_random_point_by_room_type("living_room")[1]
- logging.info("Random point in living_room: {}".format(pt))
+ pt = scene.get_random_point_by_room_type('living_room')[1]
+ print('random point in living_room', pt)
- for _ in range(10):
- random_floor = scene.get_random_floor()
- p1 = scene.get_random_point(random_floor)[1]
- p2 = scene.get_random_point(random_floor)[1]
- shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True)
- logging.info("Random point 1: {}".format(p1))
- logging.info("Random point 2: {}".format(p2))
- logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance))
- logging.info("Shortest path from p1 to p2: {}".format(shortest_path))
-
- input("Press enter")
-
- while True:
- with Profiler("Simulator step"):
- s.step()
+ for _ in range(1000):
+ s.step()
s.disconnect()
-if __name__ == "__main__":
+if __name__ == '__main__':
main()
+
```
##### Texture Randomization
-In this example, we demonstrate material/texture randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the material, texture and dynamic properties of all scene objects by calling `scene.randomize_texture` on-demand. The code can be found here: [igibson/examples/scenes/scene_texture_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/scene_texture_rand_example.py ).
+In this example, we demonstrate material/texture randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the material, texture and dynamic properties of all scene objects by calling `scene.randomize_texture` on-demand. The code can be found here: [igibson/examples/demo/scene_interactive_texture_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_texture_rand_example.py).
The randomized materials in the `ExternalView` window should look like this.
![scene_interactive_texture_rand](images/scene_interactive_texture_rand.png)
##### Object Randomization
-In this example, we demonstrate object randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the object models while maintaining their poses and categories. Note that when object models are randomized, there is no guarantee that they have no collisions or the fixed, articulated objects can extend their joints without collision. We provide `scene.check_scene_quality` functionality to check scene quality and you should do object model re-sampling if this function returns `False`. An alternative way (recommended) is to use randoml object model configuration that we provide (10 for each scenes) which guarantees scene quality, by passing in `object_randomization_idx=[0-9]`. Finally, object randomization can be expensive because the new object models need to be loaded to the simulator each time, so we recommend only using it occasionally (e.g. every 1000 training episodes). The code can be found here: [igibson/examples/scenes/scene_texture_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/scene_texture_rand_example.py).
+In this example, we demonstrate object randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the object models while maintaining their poses and categories. Note that when object models are randomized, there is no guarantee that they have no collisions or the fixed, articulated objects can extend their joints without collision. We provide `scene.check_scene_quality` functionality to check scene quality and you should do object model re-sampling if this function returns `False`. An alternative way (recommended) is to use randoml object model configuration that we provide (10 for each scenes) which guarantees scene quality, by passing in `object_randomization_idx=[0-9]`. Finally, object randomization can be expensive because the new object models need to be loaded to the simulator each time, so we recommend only using it occasionally (e.g. every 1000 training episodes). The code can be found here: [igibson/examples/demo/scene_interactive_object_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_object_rand_example.py).
The randomized object models in the `ExternalView` window should look like this.
![scene_interactive_object_rand](images/scene_interactive_object_rand.png)
##### Partial Scene Loading
-In this example, we demonstrate partial scene loading functionality of `InteractiveIndoorScene`. Specifically in this example we only load "chairs" in "living rooms". This can be useful for tasks that only require certain object categories or rooms. The code can be found here: [igibson/examples/scenes/scene_partial_loading_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/scene_partial_loading_example.py).
+In this example, we demonstrate partial scene loading functionality of `InteractiveIndoorScene`. Specifically in this example we only load "chairs" in "living rooms". This can be useful for tasks that only require certain object categories or rooms. The code can be found here: [igibson/examples/demo/scene_interactive_partial_loading_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_partial_loading_example.py).
#### Visualize Traversability Map
-In this example, we visuliaze the traversability map of a scene. We use this map to build an internal traversability graph for each floor so that we can compute the shortest path between two locations, and place robots and objects at valid locations inside the scene. The code can be found here: [igibson/examples/robots/trav_map_vis_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/robots/trav_map_vis_example.py).
+In this example, we visuliaze the traversability map of a scene. We use this map to build an internal traversability graph for each floor so that we can compute the shortest path between two locations, and place robots and objects at valid locations inside the scene. The code can be found here: [igibson/examples/demo/trav_map_vis_example.py](https://github.com/StanfordVL/iGibson/blob/master/examples/trav_map_vis_example.py).
The traversability map of the scene `Rs` looks like this:
![trav_map_vis](images/trav_map_vis.png)
diff --git a/docs/simulators.md b/docs/simulators.md
index c94ae25de..e460e3e58 100644
--- a/docs/simulators.md
+++ b/docs/simulators.md
@@ -1,4 +1,4 @@
-# Simulator
+# Simulators
### Overview
@@ -6,7 +6,7 @@
Some key functions are the following:
- `load`: initialize PyBullet physics engine and MeshRenderer
-- `import_{scene, ig_scene}`: import the scene into PyBullet by calling `scene.load`, and then import it into MeshRenderer by calling `self.renderer.add_instance_group`. If `InteractiveIndoorScene` is imported using `import_scene`, all objects in the scene are also imported.
+- `import_{scene, ig_scene}`: import the scene into PyBullet by calling `scene.load`, and then import it into MeshRenderer by calling `self.renderer.add_instance`. If `InteractiveIndoorScene` is imported using `import_ig_scene`, all objects in the scene are also imported.
- `import_{object, articulated_object, robot}`: import the object, articulated object and robot into the simulator in a similar manner
- `sync`: synchronize the poses of the dynamic objects (including the robots) between PyBullet and MeshRenderer. Specifically, it calls `update_position` for each object, in which it retrieve the object's pose in PyBullet, and then update its pose accordingly in MeshRenderer.
@@ -15,10 +15,10 @@ If `Simulator` uses `gui` mode, by default it will also maintain a `Viewer`, whi
Most of the code can be found here: [igibson/simulator.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/simulator.py).
### Examples
-In this example, we import a `StaticIndoorScene`, a `Turtlebot`, and ten `YCBObject` into the simulator.
+In this example, we import a `StaticIndoorScene`, a `Turtlebot`, and ten `YCBObject` into the simulator. The code can be found here: [igibson/examples/demo/simulator_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/simulator_example.py)
```python
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.simulator import Simulator
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.objects.ycb_object import YCBObject
@@ -30,9 +30,9 @@ from IPython import embed
def main():
- config = parse_config('../configs/turtlebot_static_nav.yaml')
+ config = parse_config('../configs/turtlebot_demo.yaml')
settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- s = Simulator(mode='gui_interactive', image_width=256,
+ s = Simulator(mode='gui', image_width=256,
image_height=256, rendering_settings=settings)
scene = StaticIndoorScene('Rs',
diff --git a/docs/tests.md b/docs/tests.md
index be3f66706..bc43b6913 100644
--- a/docs/tests.md
+++ b/docs/tests.md
@@ -1,8 +1,8 @@
-# Tests and Examples
-### Tests
-We provide tests in [this directory](https://github.com/StanfordVL/iGibson/tree/master/tests) in our repository.
-You can run them executing:
+# Tests
+### Overview
+We provide tests in [test](https://github.com/StanfordVL/iGibson/tree/master/igibson/test). You can run them by this:
```bash
+cd igibson/test
pytest --ignore disabled --ignore benchmark
```
It will take a few minutes. If all tests pass, you will see something like this
@@ -21,3 +21,4 @@ test_scene_importing.py .... [ 9
test_simulator.py . [ 96% ]
test_viewer.py
```
+
diff --git a/docs/viewer.md b/docs/viewer.md
index 0335485ac..a312211c4 100644
--- a/docs/viewer.md
+++ b/docs/viewer.md
@@ -2,24 +2,19 @@
### Overview
-We developed an easy-to-use iGibson-human interface called **Viewer** for users to inspect and interact with our scenes and objects. The Viewer will automatically pop up if you use `gui_non_interactive` or `gui_interactive` mode in `Simulator`.
+We developed an easy-to-use iGibson-human interface called **Viewer** for users to inspect and interact with our scenes and objects. The Viewer will automatically pop up if you use `gui` or `iggui` mode in `Simulator`.
To play with an example of viewer, you can run the following command:
```bash
-python -m igibson.examples.environments.env_int_example
+python -m igibson.examples.demo.mouse_interaction
```
-or
-```bash
-python -m igibson.examples.environments.env_nonint_example
-```
-after downloading the Rs_int or Rs scenes (interactive or non-interactive, see [the installation instructions](installation.md))
![viewer.png](images/viewer.png)
-In example image above, you can see on the top left corner `px 0.4 py -0.9 pz 1.2`, which indicates the camera position, `[1.0, 0.1, -0.1]`, which indicates the camera viewing direction, and `manip mode`, which indicates the current control mode you are in. There are three modes that we explain below.
+On the top left corner, you can see `px 0.4 py -0.9 pz 1.2`, which indicates the camera position, `[1.0, 0.1, -0.1]`, which indicates the camera orientation, and `manip mode`, which indicates the current control mode you are in (explained below).
Keyboard control includes the following
-- `W`, `A`, `S`, `D`, `T`, `G`: translate forward/left/backward/right/up/down
+- `W`, `A`, `S`, `D`: translate forward/left/backward/right
- `Q`, `E`: rotate left/right
- `M`: choose between different control mode (navigation, manipulation and planning)
- `R`: start/stop recording
@@ -45,4 +40,4 @@ Mouse control in planning mode
In manipulation and planning modes, a visual indicator will be visualized in the `Viewer` to assist control (e.g. the blue sphere at the bottom in the image above).
-The code can be found in [igibson/render/viewer.py](https://github.com/StanfordVL/iGibson/tree/master/igibson/render/viewer.py).
+Most of the code can be found in [igibson/render/viewer.py](https://github.com/StanfordVL/iGibson/tree/master/igibson/render/viewer.py).
diff --git a/docs/virtual_reality.md b/docs/virtual_reality.md
index 29b6259c2..fb4e9a4a8 100644
--- a/docs/virtual_reality.md
+++ b/docs/virtual_reality.md
@@ -1,39 +1,38 @@
### Virtual Reality Overview
-Virtual reality is currently supported on Windows 10/11 on the HTC Vive and Oculus Rift/Quest (with link), and on Linux with the HTC Vive (there is no linux driver for Oculus).
+Virtual reality is currently only supported on Windows 10/11 on the HTC Vive and Oculus Rift/Quest (with link).
-The HTC Vive Pro Eye tracking driver is not available for Linux. You must have the latest Nvidia driver 470.XX and SteamVR 1.19.7 as asynchronous reprojection (a form of motion smoothing necessary for a usable VR experience) was only added in mid 2021.
+Linux support is planned, but the HTC Vive Pro Eye tracking driver is not available for Linux, and currently aysnchronous projection/motion smoothing is not supported on the Nvidia proprietary drivers on linux.
### Setup
1. Set up the HTC Vive VR hardware according to the [setup guide](https://support.steampowered.com/steamvr/HTC_Vive/)
2. (optional) if you plan to use eye tracking on Windows, create a [vive developer account](https://hub.vive.com/sso/login) then download and install the [SRAnipal runtime](https://developer.vive.com/resources/vive-sense/sdk/vive-eye-and-facial-tracking-sdk/). Note you should [calibrate](https://developer.vive.com/us/support/sdk/category_howto/how-to-calibrate-eye-tracking.html) the Vive eye tracker before each recording session.
-3. Ensure you have installed iGibson according to the installation [instructions](http://svl.stanford.edu/igibson/docs/installation.html#installation-method).
- * Note: On Windows VR support is enabled by default. On Linux, you must install with an additional environmental variable `USE_VR=TRUE pip install -e .`. You must also have addition development headers installed: on Ubuntu `sudo apt install xorg-dev` and on Centos/Fedora: `sudo dnf install libXinerama-devel libXi-devel libXrandr-devel libXcursor-devel`.
+3. Ensure you have installed iGibson according to the installation [instructions](http://svl.stanford.edu/igibson/docs/installation.html#installation-method)
### VR examples
We have several examples showing how to use our VR interface:
-* vr demo files: `igibson/examples/vr`
+* vr demo files: `igibson/examples/demo/vr_demos`
-* multi-user VR (experimental): `igibson/examples/vr/muvr`
+* multi-user VR (experimental): `igibson/examples/demo/vr_demos/muvr`
-* benchmark: `igibson/examples/vr/in_development`
+* benchmark: `igibson/examples/demo/vr_demos/in_development`
- `vr_hand_dex_benchmark.py` -- Demonstrates various challenging manipulation tasks
- `vr_hand_speed_benchmark.py` -- Demonstrates various tasks to assess user speed at working in VR
-* data_save_replay: `igibson/examples/vr/data_save_replay`
- - This folder demonstrates how to save VR recordings and "replay" them to reproduce the recorded trajectory
+* data_save_replay: `igibson/examples/demo/vr_demos/data_save_replay`
+ - This folder demonstrates how to save VR recordings and "replay" them to reproduce the reorded trajectory
-* robot_embodiment: `igibson/examples/vr/robot_embodiment`
+* robot_embodiment: `igibson/examples/demo/vr_demos/robot_embodiment`
- This folder demonstrates the VR interface to the Fetch robot (WIP)
Benchmarks:
-We have two benchmarks - a hand and speed benchmark, both of which can be found in the top level of the vr examples folder. In these demos, you can time yourself performing various challenges,
+We have two benchmarks - a hand and speed benchmark, both of which can be found in the top level of the vr_demos folder. In these demos, you can time yourself performing various challenges,
such as putting objects away into specific containers/cabinets. Please see the comments in these demo files for more information.
### VR config and button mapping:
@@ -54,7 +53,7 @@ Some additional options you may be interested in changing:
We recommend looking at `igibson/render/mesh_renderer/mesh_renderer_vr.py` to see the VrSettings class which reads from `vr_config.yaml`. A single VrSettings object is created and passed in to the `Simulator` constructor.
-Note(optional): If you are using a device not already mapped, please run `igibson/examples/vr/in_development/vr_button_mapping.py` to figure out which physical controller buttons correspond to which indices in OpenVR.
+Note(optional): If you are using a device not already mapped, please run `igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py` to figure out which physical controller buttons correspond to which indices in OpenVR.
### Mirroring the VR view on the monitor
diff --git a/igibson/__init__.py b/igibson/__init__.py
index a3348ce75..bde107b0b 100644
--- a/igibson/__init__.py
+++ b/igibson/__init__.py
@@ -3,7 +3,7 @@
import yaml
-__version__ = "2.0.4"
+__version__ = "2.0.1"
logging.getLogger().setLevel(logging.INFO)
@@ -62,18 +62,19 @@
if not os.path.isabs(key_path):
key_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), key_path)
-logging.info("Importing iGibson (igibson module)")
-logging.info("Assets path: {}".format(assets_path))
-logging.info("Gibson Dataset path: {}".format(g_dataset_path))
-logging.info("iG Dataset path: {}".format(ig_dataset_path))
-logging.info("3D-FRONT Dataset path: {}".format(threedfront_dataset_path))
-logging.info("CubiCasa5K Dataset path: {}".format(cubicasa_dataset_path))
-logging.info("iGibson Key path: {}".format(key_path))
+# NOTE: njk: turned off below logging prints to avoid cluttering terminal when running predicators code.
+# logging.info("Importing iGibson (igibson module)")
+# logging.info("Assets path: {}".format(assets_path))
+# logging.info("Gibson Dataset path: {}".format(g_dataset_path))
+# logging.info("iG Dataset path: {}".format(ig_dataset_path))
+# logging.info("3D-FRONT Dataset path: {}".format(threedfront_dataset_path))
+# logging.info("CubiCasa5K Dataset path: {}".format(cubicasa_dataset_path))
+# logging.info("iGibson Key path: {}".format(key_path))
example_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples")
-example_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
+example_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "configs")
-logging.info("Example path: {}".format(example_path))
-logging.info("Example config path: {}".format(example_config_path))
+# logging.info("Example path: {}".format(example_path))
+# logging.info("Example config path: {}".format(example_config_path))
debug_sampling = False
diff --git a/igibson/examples/environments/__init__.py b/igibson/activity/__init__.py
similarity index 100%
rename from igibson/examples/environments/__init__.py
rename to igibson/activity/__init__.py
diff --git a/igibson/activity/activity_base.py b/igibson/activity/activity_base.py
new file mode 100644
index 000000000..a39e7d008
--- /dev/null
+++ b/igibson/activity/activity_base.py
@@ -0,0 +1,974 @@
+import logging
+from collections import OrderedDict
+
+import networkx as nx
+import pybullet as p
+from bddl.activity_base import BEHAVIORActivityInstance
+from bddl.condition_evaluation import Negation
+from bddl.logic_base import AtomicFormula
+
+import igibson
+from igibson.external.pybullet_tools.utils import *
+from igibson.object_states.on_floor import RoomFloor
+from igibson.objects.articulated_object import URDFObject
+from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
+from igibson.robots.behavior_robot import BehaviorRobot
+from igibson.robots.fetch_gripper_robot import FetchGripper
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path
+from igibson.utils.checkpoint_utils import load_internal_states, save_internal_states
+from igibson.utils.constants import (
+ AGENT_POSE_DIM,
+ FLOOR_SYNSET,
+ MAX_TASK_RELEVANT_OBJS,
+ NON_SAMPLEABLE_OBJECTS,
+ TASK_RELEVANT_OBJS_OBS_DIM,
+)
+
+KINEMATICS_STATES = frozenset({"inside", "ontop", "under", "onfloor"})
+
+
+class iGBEHAVIORActivityInstance(BEHAVIORActivityInstance):
+ def __init__(
+ self,
+ behavior_activity,
+ activity_definition=0,
+ predefined_problem=None,
+ robot_type=BehaviorRobot,
+ robot_config={},
+ ):
+ """
+ Initialize simulator with appropriate scene and sampled objects.
+ :param behavior_activity: string, official ATUS activity label
+ :param activity_definition: int, specific instance of behavior_activity init/final conditions
+ optional, randomly generated if not specified
+ :param predefined_problem: string, in format of a BEHAVIOR problem file read
+ """
+ super().__init__(
+ behavior_activity,
+ activity_definition=activity_definition,
+ scene_path=os.path.join(igibson.ig_dataset_path, "scenes"),
+ predefined_problem=predefined_problem,
+ )
+ self.state_history = {}
+ self.robot_type = robot_type
+ self.robot_config = robot_config
+
+ def initialize_simulator(
+ self,
+ simulator=None,
+ mode="headless",
+ scene_id=None,
+ scene_kwargs=None,
+ load_clutter=False,
+ should_debug_sampling=False,
+ online_sampling=True,
+ ):
+ """
+ Get scene populated with objects such that scene satisfies initial conditions
+ :param simulator: Simulator class, populated simulator that should completely
+ replace this function. Use if you would like to bypass internal
+ Simulator instantiation and population based on initial conditions
+ and use your own. Warning that if you use this option, we cannot
+ guarantee that the final conditions will be reachable.
+ """
+ # Set self.scene_name, self.scene, self.sampled_simulator_objects, and self.sampled_dsl_objects
+ if simulator is None:
+ self.simulator = Simulator(mode=mode, image_width=960, image_height=720, device_idx=0)
+ else:
+ self.simulator = simulator
+ self.load_clutter = load_clutter
+ self.should_debug_sampling = should_debug_sampling
+ if online_sampling:
+ scene_kwargs["merge_fixed_links"] = False
+ result = self.initialize(
+ InteractiveIndoorScene,
+ scene_id=scene_id,
+ scene_kwargs=scene_kwargs,
+ online_sampling=online_sampling,
+ )
+ self.initial_state = self.save_scene()
+ self.task_obs_dim = MAX_TASK_RELEVANT_OBJS * TASK_RELEVANT_OBJS_OBS_DIM + AGENT_POSE_DIM
+ return result
+
+ def save_scene(self):
+ snapshot_id = p.saveState()
+ self.state_history[snapshot_id] = save_internal_states(self.simulator)
+ return snapshot_id
+
+ def reset_scene(self, snapshot_id):
+ p.restoreState(snapshot_id)
+ load_internal_states(self.simulator, self.state_history[snapshot_id])
+
+ def check_scene(self):
+ feedback = {"init_success": "yes", "goal_success": "untested", "init_feedback": "", "goal_feedback": ""}
+ self.newly_added_objects = set()
+ room_type_to_obj_inst = {}
+ self.non_sampleable_object_inst = set()
+ for cond in self.parsed_initial_conditions:
+ if cond[0] == "inroom":
+ obj_inst, room_type = cond[1], cond[2]
+ obj_cat = self.obj_inst_to_obj_cat[obj_inst]
+ if obj_cat not in NON_SAMPLEABLE_OBJECTS:
+ error_msg = "You have assigned room type for [{}], but [{}] is sampleable. Only non-sampleable objects can have room assignment.".format(
+ obj_cat, obj_cat
+ )
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+ # Room type missing in the scene
+ if room_type not in self.scene.room_sem_name_to_ins_name:
+ error_msg = "Room type [{}] missing in scene [{}].".format(room_type, self.scene.scene_id)
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ if room_type not in room_type_to_obj_inst:
+ room_type_to_obj_inst[room_type] = []
+
+ room_type_to_obj_inst[room_type].append(obj_inst)
+ if obj_inst in self.non_sampleable_object_inst:
+ error_msg = "Object [{}] has more than one room assignment".format(obj_inst)
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+ self.non_sampleable_object_inst.add(obj_inst)
+
+ self.sampling_orders = []
+ cur_batch = self.non_sampleable_object_inst
+ while len(cur_batch) > 0:
+ self.sampling_orders.append(cur_batch)
+ next_batch = set()
+ for cond in self.parsed_initial_conditions:
+ if len(cond) == 3 and cond[2] in cur_batch:
+ next_batch.add(cond[1])
+ cur_batch = next_batch
+
+ if len(self.sampling_orders) > 0:
+ remaining_objs = self.object_scope.keys() - set.union(*self.sampling_orders)
+ else:
+ remaining_objs = self.object_scope.keys()
+
+ if len(remaining_objs) != 0:
+ error_msg = "Some objects do not have any kinematic condition defined for them in the initial conditions: {}".format(
+ ", ".join(remaining_objs)
+ )
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ for obj_cat in self.objects:
+ if obj_cat not in NON_SAMPLEABLE_OBJECTS:
+ continue
+ for obj_inst in self.objects[obj_cat]:
+ if obj_inst not in self.non_sampleable_object_inst:
+ error_msg = (
+ "All non-sampleable objects should have room assignment. [{}] does not have one.".format(
+ obj_inst
+ )
+ )
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ room_type_to_scene_objs = {}
+ for room_type in room_type_to_obj_inst:
+ room_type_to_scene_objs[room_type] = {}
+ for obj_inst in room_type_to_obj_inst[room_type]:
+ room_type_to_scene_objs[room_type][obj_inst] = {}
+ obj_cat = self.obj_inst_to_obj_cat[obj_inst]
+ # We allow burners to be used as if they are stoves
+ categories = self.object_taxonomy.get_subtree_igibson_categories(obj_cat)
+ if obj_cat == "stove.n.01":
+ categories += self.object_taxonomy.get_subtree_igibson_categories("burner.n.01")
+ for room_inst in self.scene.room_sem_name_to_ins_name[room_type]:
+ if obj_cat == FLOOR_SYNSET:
+ # TODO: remove after split floors
+ # Create a RoomFloor for each room instance
+ # This object is NOT imported by the simulator
+ room_floor = RoomFloor(
+ category="room_floor",
+ name="room_floor_{}".format(room_inst),
+ scene=self.scene,
+ room_instance=room_inst,
+ floor_obj=self.scene.objects_by_name["floors"],
+ )
+ scene_objs = [room_floor]
+ else:
+ room_objs = []
+ if room_inst in self.scene.objects_by_room:
+ room_objs = self.scene.objects_by_room[room_inst]
+ scene_objs = [obj for obj in room_objs if obj.category in categories]
+ if len(scene_objs) != 0:
+ room_type_to_scene_objs[room_type][obj_inst][room_inst] = scene_objs
+
+ # Store options for non-sampleable objects in self.non_sampleable_object_scope
+ # {
+ # "table1": {
+ # "living_room_0": [URDFObject, URDFObject, URDFObject],
+ # "living_room_1": [URDFObject]
+ # },
+ # "table2": {
+ # "living_room_0": [URDFObject, URDFObject],
+ # "living_room_1": [URDFObject, URDFObject]
+ # },
+ # "chair1": {
+ # "living_room_0": [URDFObject],
+ # "living_room_1": [URDFObject]
+ # },
+ # }
+ for room_type in room_type_to_scene_objs:
+ # For each room_type, filter in room_inst that has non-empty
+ # options for all obj_inst in this room_type
+ room_inst_satisfied = set.intersection(
+ *[
+ set(room_type_to_scene_objs[room_type][obj_inst].keys())
+ for obj_inst in room_type_to_scene_objs[room_type]
+ ]
+ )
+ if len(room_inst_satisfied) == 0:
+ error_msg = "Room type [{}] of scene [{}] does not contain all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format(
+ room_type, self.scene.scene_id
+ )
+ for obj_inst in room_type_to_scene_objs[room_type]:
+ error_msg += (
+ "{}: ".format(obj_inst) + ", ".join(room_type_to_scene_objs[room_type][obj_inst].keys()) + "\n"
+ )
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ for obj_inst in room_type_to_scene_objs[room_type]:
+ room_type_to_scene_objs[room_type][obj_inst] = {
+ key: val
+ for key, val in room_type_to_scene_objs[room_type][obj_inst].items()
+ if key in room_inst_satisfied
+ }
+
+ self.non_sampleable_object_scope = room_type_to_scene_objs
+
+ num_new_obj = 0
+ # Only populate self.object_scope for sampleable objects
+ avg_category_spec = get_ig_avg_category_specs()
+ for obj_cat in self.objects:
+ if obj_cat == "agent.n.01":
+ continue
+ if obj_cat in NON_SAMPLEABLE_OBJECTS:
+ continue
+ is_sliceable = self.object_taxonomy.has_ability(obj_cat, "sliceable")
+ categories = self.object_taxonomy.get_subtree_igibson_categories(obj_cat)
+
+ # TODO: temporary hack
+ remove_categories = [
+ "pop_case", # too large
+ "jewel", # too small
+ "ring", # too small
+ ]
+ for remove_category in remove_categories:
+ if remove_category in categories:
+ categories.remove(remove_category)
+
+ if is_sliceable:
+ categories = [cat for cat in categories if "half_" not in cat]
+
+ for obj_inst in self.objects[obj_cat]:
+ category = np.random.choice(categories)
+ category_path = get_ig_category_path(category)
+ model_choices = os.listdir(category_path)
+
+ # Filter object models if the object category is openable
+ synset = self.object_taxonomy.get_class_name_from_igibson_category(category)
+ if self.object_taxonomy.has_ability(synset, "openable"):
+ model_choices = [m for m in model_choices if "articulated_" in m]
+ if len(model_choices) == 0:
+ error_msg = "{} is Openable, but does not have articulated models.".format(category)
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ model = np.random.choice(model_choices)
+
+ # for "collecting aluminum cans", we need pop cans (not bottles)
+ if category == "pop" and self.behavior_activity in ["collecting_aluminum_cans"]:
+ model = np.random.choice([str(i) for i in range(40, 46)])
+ if category == "spoon" and self.behavior_activity in ["polishing_silver"]:
+ model = np.random.choice([str(i) for i in [2, 5, 6]])
+
+ model_path = get_ig_model_path(category, model)
+ filename = os.path.join(model_path, model + ".urdf")
+ obj_name = "{}_{}".format(category, len(self.scene.objects_by_name))
+ simulator_obj = URDFObject(
+ filename,
+ name=obj_name,
+ category=category,
+ model_path=model_path,
+ avg_obj_dims=avg_category_spec.get(category),
+ fit_avg_dim_volume=True,
+ texture_randomization=False,
+ overwrite_inertial=True,
+ initial_pos=[100 + num_new_obj, 100, -100],
+ )
+ num_new_obj += 1
+
+ if is_sliceable:
+ whole_object = simulator_obj
+ object_parts = []
+ assert "object_parts" in simulator_obj.metadata, "object_parts not found in metadata: [{}]".format(
+ model_path
+ )
+
+ for i, part in enumerate(simulator_obj.metadata["object_parts"]):
+ category = part["category"]
+ model = part["model"]
+ # Scale the offset accordingly
+ part_pos = part["pos"] * whole_object.scale
+ part_orn = part["orn"]
+ model_path = get_ig_model_path(category, model)
+ filename = os.path.join(model_path, model + ".urdf")
+ obj_name = whole_object.name + "_part_{}".format(i)
+ simulator_obj_part = URDFObject(
+ filename,
+ name=obj_name,
+ category=category,
+ model_path=model_path,
+ avg_obj_dims=avg_category_spec.get(category),
+ fit_avg_dim_volume=False,
+ scale=whole_object.scale,
+ texture_randomization=False,
+ overwrite_inertial=True,
+ initial_pos=[100 + num_new_obj, 100, -100],
+ )
+ num_new_obj += 1
+ object_parts.append((simulator_obj_part, (part_pos, part_orn)))
+
+ assert len(object_parts) > 0
+ grouped_obj_parts = ObjectGrouper(object_parts)
+ simulator_obj = ObjectMultiplexer(
+ whole_object.name + "_multiplexer", [whole_object, grouped_obj_parts], 0
+ )
+
+ if not self.scene.loaded:
+ self.scene.add_object(simulator_obj)
+ else:
+ self.simulator.import_object(simulator_obj)
+ self.newly_added_objects.add(simulator_obj)
+ self.object_scope[obj_inst] = simulator_obj
+
+ return True, feedback
+
+ def import_agent(self, use_ghost_hands = False):
+ cached_initial_pose = not self.online_sampling and self.scene.agent != {}
+ if self.robot_type == BehaviorRobot:
+ agent = BehaviorRobot(self.simulator)
+ self.simulator.import_behavior_robot(agent)
+ agent.set_position_orientation([300, 300, 300], [0, 0, 0, 1])
+ self.object_scope["agent.n.01_1"] = agent.parts["body"]
+ if cached_initial_pose:
+ agent.parts["body"].set_base_link_position_orientation(
+ self.scene.agent["BRBody_1"]["xyz"], quat_from_euler(self.scene.agent["BRBody_1"]["rpy"])
+ )
+ agent.parts["left_hand"].set_base_link_position_orientation(
+ self.scene.agent["left_hand_1"]["xyz"], quat_from_euler(self.scene.agent["left_hand_1"]["rpy"])
+ )
+ agent.parts["right_hand"].set_base_link_position_orientation(
+ self.scene.agent["right_hand_1"]["xyz"], quat_from_euler(self.scene.agent["right_hand_1"]["rpy"])
+ )
+ if use_ghost_hands:
+ agent.parts["left_hand"].ghost_hand.set_base_link_position_orientation(
+ self.scene.agent["left_hand_1"]["xyz"], quat_from_euler(self.scene.agent["left_hand_1"]["rpy"])
+ )
+ agent.parts["right_hand"].ghost_hand.set_base_link_position_orientation(
+ self.scene.agent["right_hand_1"]["xyz"], quat_from_euler(self.scene.agent["right_hand_1"]["rpy"])
+ )
+ agent.parts["eye"].set_base_link_position_orientation(
+ self.scene.agent["BREye_1"]["xyz"], quat_from_euler(self.scene.agent["BREye_1"]["rpy"])
+ )
+ elif self.robot_type == FetchGripper:
+ agent = FetchGripper(self.simulator, self.robot_config)
+ self.simulator.import_robot(agent)
+ agent.set_position_orientation([300, 300, 300], [0, 0, 0, 1])
+ self.object_scope["agent.n.01_1"] = agent
+ if cached_initial_pose:
+ # Use the cached pose of BehaviorRobot to initialize FetchGripper
+ pos = np.copy(self.scene.agent["BRBody_1"]["xyz"])
+ pos[2] = 0.0
+ pos[2] = stable_z_on_aabb(agent.get_body_id(), [pos, pos])
+ agent.set_position_orientation(pos, quat_from_euler(self.scene.agent["BRBody_1"]["rpy"]))
+ else:
+ Exception("Only BehaviorRobot and FetchGripper are supported")
+
+ agent.robot_specific_reset()
+ self.simulator.register_main_vr_robot(agent)
+ assert len(self.simulator.robots) == 1, "Error, multiple agents is not currently supported"
+
+ def move_agent(self):
+ """
+ Backwards compatibility, to be deprecated
+ """
+ pass
+
+ def import_scene(self):
+ self.simulator.reload()
+ self.simulator.import_ig_scene(self.scene)
+
+ # if not self.online_sampling:
+ for obj_inst in self.object_scope:
+ matched_sim_obj = None
+
+ # TODO: remove after split floors
+ if "floor.n.01" in obj_inst:
+ for _, sim_obj in self.scene.objects_by_name.items():
+ if sim_obj.bddl_object_scope is not None and obj_inst in sim_obj.bddl_object_scope:
+ bddl_object_scope = sim_obj.bddl_object_scope.split(",")
+ bddl_object_scope = {item.split(":")[0]: item.split(":")[1] for item in bddl_object_scope}
+ assert obj_inst in bddl_object_scope
+ room_inst = bddl_object_scope[obj_inst].replace("room_floor_", "")
+ matched_sim_obj = RoomFloor(
+ category="room_floor",
+ name=bddl_object_scope[obj_inst],
+ scene=self.scene,
+ room_instance=room_inst,
+ floor_obj=self.scene.objects_by_name["floors"],
+ )
+ elif obj_inst == "agent.n.01_1":
+ # Skip adding agent to object scope, handled later by import_agent()
+ continue
+ else:
+ for _, sim_obj in self.scene.objects_by_name.items():
+ if sim_obj.bddl_object_scope == obj_inst:
+ matched_sim_obj = sim_obj
+ break
+
+ assert matched_sim_obj is not None, obj_inst
+ self.object_scope[obj_inst] = matched_sim_obj
+
+ def sample(self, kinematic_only=False):
+ feedback = {"init_success": "yes", "goal_success": "yes", "init_feedback": "", "goal_feedback": ""}
+ non_sampleable_obj_conditions = []
+ sampleable_obj_conditions = []
+
+ # TODO: currently we assume self.initial_conditions is a list of
+ # bddl.condition_evaluation.HEAD, each with one child.
+ # This chid is either a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate or
+ # a Negation of a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate
+ for condition in self.initial_conditions:
+ if not isinstance(condition.children[0], Negation) and not isinstance(condition.children[0], AtomicFormula):
+ logging.warning(("Skipping over sampling of predicate that is not a negation or an atomic formula"))
+ continue
+ if kinematic_only:
+ if isinstance(condition.children[0], Negation):
+ if condition.children[0].children[0].STATE_NAME not in KINEMATICS_STATES:
+ continue
+ else:
+ if condition.children[0].STATE_NAME not in KINEMATICS_STATES:
+ continue
+ if isinstance(condition.children[0], Negation):
+ condition = condition.children[0].children[0]
+ positive = False
+ else:
+ condition = condition.children[0]
+ positive = True
+ condition_body = set(condition.body)
+ if len(self.non_sampleable_object_inst.intersection(condition_body)) > 0:
+ non_sampleable_obj_conditions.append((condition, positive))
+ else:
+ sampleable_obj_conditions.append((condition, positive))
+
+ # First, try to fulfill the initial conditions that involve non-sampleable objects
+ # Filter in all simulator objects that allow successful sampling for each object inst
+ init_sampling_log = []
+ scene_object_scope_filtered = {}
+ for room_type in self.non_sampleable_object_scope:
+ scene_object_scope_filtered[room_type] = {}
+ for scene_obj in self.non_sampleable_object_scope[room_type]:
+ scene_object_scope_filtered[room_type][scene_obj] = {}
+ for room_inst in self.non_sampleable_object_scope[room_type][scene_obj]:
+ for obj in self.non_sampleable_object_scope[room_type][scene_obj][room_inst]:
+ self.object_scope[scene_obj] = obj
+
+ success = True
+ # If this object is not involved in any initial conditions,
+ # success will be True by default and any simulator obj will qualify
+ for condition, positive in non_sampleable_obj_conditions:
+ # Always skip non-kinematic state sampling. Only do so after the object scope has been finalized
+ if condition.STATE_NAME not in KINEMATICS_STATES:
+ continue
+ # Only sample conditions that involve this object
+ if scene_obj not in condition.body:
+ continue
+ success = condition.sample(binary_state=positive)
+ log_msg = " ".join(
+ [
+ "initial condition sampling",
+ room_type,
+ scene_obj,
+ room_inst,
+ obj.name,
+ condition.STATE_NAME,
+ str(condition.body),
+ str(success),
+ ]
+ )
+ logging.warning((log_msg))
+ init_sampling_log.append(log_msg)
+
+ if not success:
+ break
+
+ if not success:
+ continue
+
+ if room_inst not in scene_object_scope_filtered[room_type][scene_obj]:
+ scene_object_scope_filtered[room_type][scene_obj][room_inst] = []
+ scene_object_scope_filtered[room_type][scene_obj][room_inst].append(obj)
+
+ for room_type in scene_object_scope_filtered:
+ # For each room_type, filter in room_inst that has successful
+ # sampling options for all obj_inst in this room_type
+ room_inst_satisfied = set.intersection(
+ *[
+ set(scene_object_scope_filtered[room_type][obj_inst].keys())
+ for obj_inst in scene_object_scope_filtered[room_type]
+ ]
+ )
+
+ if len(room_inst_satisfied) == 0:
+ error_msg = "Room type [{}] of scene [{}] cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format(
+ room_type, self.scene.scene_id
+ )
+ for obj_inst in scene_object_scope_filtered[room_type]:
+ error_msg += (
+ "{}: ".format(obj_inst)
+ + ", ".join(scene_object_scope_filtered[room_type][obj_inst].keys())
+ + "\n"
+ )
+ error_msg += "The following are the initial condition sampling history:\n"
+ error_msg += "\n".join(init_sampling_log)
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["goal_success"] = "untested"
+ feedback["init_feedback"] = error_msg
+
+ if self.should_debug_sampling:
+ self.debug_sampling(scene_object_scope_filtered, non_sampleable_obj_conditions)
+ return False, feedback
+
+ for obj_inst in scene_object_scope_filtered[room_type]:
+ scene_object_scope_filtered[room_type][obj_inst] = {
+ key: val
+ for key, val in scene_object_scope_filtered[room_type][obj_inst].items()
+ if key in room_inst_satisfied
+ }
+
+ # For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects
+ # Left nodes: a list of object instance in scope
+ # Right nodes: a list of simulator objects
+ # Edges: if the simulator object can support the sampling requirement of this object instance
+ for room_type in scene_object_scope_filtered:
+ # The same room instances will be shared across all scene obj in a given room type
+ some_obj = list(scene_object_scope_filtered[room_type].keys())[0]
+ room_insts = list(scene_object_scope_filtered[room_type][some_obj].keys())
+ success = False
+ init_mbm_log = []
+ # Loop through each room instance
+ for room_inst in room_insts:
+ graph = nx.Graph()
+ # For this given room instance, gether mapping from obj instance to a list of simulator obj
+ obj_inst_to_obj_per_room_inst = {}
+ for obj_inst in scene_object_scope_filtered[room_type]:
+ obj_inst_to_obj_per_room_inst[obj_inst] = scene_object_scope_filtered[room_type][obj_inst][
+ room_inst
+ ]
+ top_nodes = []
+ log_msg = "MBM for room instance [{}]".format(room_inst)
+ logging.warning((log_msg))
+ init_mbm_log.append(log_msg)
+ for obj_inst in obj_inst_to_obj_per_room_inst:
+ for obj in obj_inst_to_obj_per_room_inst[obj_inst]:
+ # Create an edge between obj instance and each of the simulator obj that supports sampling
+ graph.add_edge(obj_inst, obj)
+ log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name)
+ logging.warning((log_msg))
+ init_mbm_log.append(log_msg)
+ top_nodes.append(obj_inst)
+ # Need to provide top_nodes that contain all nodes in one bipartite node set
+ # The matches will have two items for each match (e.g. A -> B, B -> A)
+ matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes)
+ if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst):
+ logging.warning(("Object scope finalized:"))
+ for obj_inst, obj in matches.items():
+ if obj_inst in obj_inst_to_obj_per_room_inst:
+ self.object_scope[obj_inst] = obj
+ logging.warning((obj_inst, obj.name))
+ success = True
+ break
+ if not success:
+ error_msg = "Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format(
+ room_type, self.scene.scene_id
+ )
+ error_msg += "The following are the initial condition matching history:\n"
+ error_msg += "\n".join(init_mbm_log)
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["goal_success"] = "untested"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ np.random.shuffle(self.ground_goal_state_options)
+ logging.warning(("number of ground_goal_state_options", len(self.ground_goal_state_options)))
+ num_goal_condition_set_to_test = 10
+
+ goal_sampling_error_msgs = []
+ # Next, try to fulfill different set of ground goal conditions (maximum num_goal_condition_set_to_test)
+ for goal_condition_set in self.ground_goal_state_options[:num_goal_condition_set_to_test]:
+ goal_condition_set_success = True
+ goal_sampling_log = []
+ # Try to fulfill the current set of ground goal conditions
+ scene_object_scope_filtered_goal_cond = {}
+ for room_type in scene_object_scope_filtered:
+ scene_object_scope_filtered_goal_cond[room_type] = {}
+ for scene_obj in scene_object_scope_filtered[room_type]:
+ scene_object_scope_filtered_goal_cond[room_type][scene_obj] = {}
+ for room_inst in scene_object_scope_filtered[room_type][scene_obj]:
+ for obj in scene_object_scope_filtered[room_type][scene_obj][room_inst]:
+ self.object_scope[scene_obj] = obj
+
+ success = True
+ for goal_condition in goal_condition_set:
+ goal_condition = goal_condition.children[0]
+ # do not sample negative goal condition
+ if isinstance(goal_condition, Negation):
+ continue
+ # only sample kinematic goal condition
+ if goal_condition.STATE_NAME not in KINEMATICS_STATES:
+ continue
+ if scene_obj not in goal_condition.body:
+ continue
+ success = goal_condition.sample(binary_state=True)
+ log_msg = " ".join(
+ [
+ "goal condition sampling",
+ room_type,
+ scene_obj,
+ room_inst,
+ obj.name,
+ goal_condition.STATE_NAME,
+ str(goal_condition.body),
+ str(success),
+ ]
+ )
+ logging.warning((log_msg))
+ goal_sampling_log.append(log_msg)
+ if not success:
+ break
+ if not success:
+ continue
+
+ if room_inst not in scene_object_scope_filtered_goal_cond[room_type][scene_obj]:
+ scene_object_scope_filtered_goal_cond[room_type][scene_obj][room_inst] = []
+ scene_object_scope_filtered_goal_cond[room_type][scene_obj][room_inst].append(obj)
+
+ for room_type in scene_object_scope_filtered_goal_cond:
+ # For each room_type, filter in room_inst that has successful
+ # sampling options for all obj_inst in this room_type
+ room_inst_satisfied = set.intersection(
+ *[
+ set(scene_object_scope_filtered_goal_cond[room_type][obj_inst].keys())
+ for obj_inst in scene_object_scope_filtered_goal_cond[room_type]
+ ]
+ )
+
+ if len(room_inst_satisfied) == 0:
+ error_msg = "Room type [{}] of scene [{}] cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format(
+ room_type, self.scene.scene_id
+ )
+ for obj_inst in scene_object_scope_filtered_goal_cond[room_type]:
+ error_msg += (
+ "{}: ".format(obj_inst)
+ + ", ".join(scene_object_scope_filtered_goal_cond[room_type][obj_inst].keys())
+ + "\n"
+ )
+ error_msg += "The following are the goal condition sampling history:\n"
+ error_msg += "\n".join(goal_sampling_log)
+ logging.warning(error_msg)
+ goal_sampling_error_msgs.append(error_msg)
+ if self.should_debug_sampling:
+ self.debug_sampling(
+ scene_object_scope_filtered_goal_cond, non_sampleable_obj_conditions, goal_condition_set
+ )
+ goal_condition_set_success = False
+ break
+
+ for obj_inst in scene_object_scope_filtered_goal_cond[room_type]:
+ scene_object_scope_filtered_goal_cond[room_type][obj_inst] = {
+ key: val
+ for key, val in scene_object_scope_filtered_goal_cond[room_type][obj_inst].items()
+ if key in room_inst_satisfied
+ }
+
+ if not goal_condition_set_success:
+ continue
+ # For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects
+ # Left nodes: a list of object instance in scope
+ # Right nodes: a list of simulator objects
+ # Edges: if the simulator object can support the sampling requirement of ths object instance
+ for room_type in scene_object_scope_filtered_goal_cond:
+ # The same room instances will be shared across all scene obj in a given room type
+ some_obj = list(scene_object_scope_filtered_goal_cond[room_type].keys())[0]
+ room_insts = list(scene_object_scope_filtered_goal_cond[room_type][some_obj].keys())
+ success = False
+ goal_mbm_log = []
+ # Loop through each room instance
+ for room_inst in room_insts:
+ graph = nx.Graph()
+ # For this given room instance, gether mapping from obj instance to a list of simulator obj
+ obj_inst_to_obj_per_room_inst = {}
+ for obj_inst in scene_object_scope_filtered_goal_cond[room_type]:
+ obj_inst_to_obj_per_room_inst[obj_inst] = scene_object_scope_filtered_goal_cond[room_type][
+ obj_inst
+ ][room_inst]
+ top_nodes = []
+ log_msg = "MBM for room instance [{}]".format(room_inst)
+ logging.warning((log_msg))
+ goal_mbm_log.append(log_msg)
+ for obj_inst in obj_inst_to_obj_per_room_inst:
+ for obj in obj_inst_to_obj_per_room_inst[obj_inst]:
+ # Create an edge between obj instance and each of the simulator obj that supports sampling
+ graph.add_edge(obj_inst, obj)
+ log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name)
+ logging.warning((log_msg))
+ goal_mbm_log.append(log_msg)
+ top_nodes.append(obj_inst)
+ # Need to provide top_nodes that contain all nodes in one bipartite node set
+ # The matches will have two items for each match (e.g. A -> B, B -> A)
+ matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes)
+ if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst):
+ logging.warning(("Object scope finalized:"))
+ for obj_inst, obj in matches.items():
+ if obj_inst in obj_inst_to_obj_per_room_inst:
+ self.object_scope[obj_inst] = obj
+ logging.warning((obj_inst, obj.name))
+ success = True
+ break
+ if not success:
+ error_msg = "Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format(
+ room_type, self.scene.scene_id
+ )
+ error_msg += "The following are the goal condition matching history:\n"
+ error_msg += "\n".join(goal_mbm_log)
+ logging.warning(error_msg)
+ goal_sampling_error_msgs.append(error_msg)
+ goal_condition_set_success = False
+ break
+
+ if not goal_condition_set_success:
+ continue
+
+ # if one set of goal conditions (and initial conditions) are satisfied, sampling is successful
+ break
+
+ if not goal_condition_set_success:
+ goal_sampling_error_msg_compiled = ""
+ for i, log_msg in enumerate(goal_sampling_error_msgs):
+ goal_sampling_error_msg_compiled += "-" * 30 + "\n"
+ goal_sampling_error_msg_compiled += "Ground condition set #{}/{}:\n".format(
+ i + 1, len(goal_sampling_error_msgs)
+ )
+ goal_sampling_error_msg_compiled += log_msg + "\n"
+ feedback["goal_success"] = "no"
+ feedback["goal_feedback"] = goal_sampling_error_msg_compiled
+ return False, feedback
+
+ # Do sampling again using the object instance -> simulator object mapping from maximum bipartite matching
+ for condition, positive in non_sampleable_obj_conditions:
+ num_trials = 10
+ for _ in range(num_trials):
+ success = condition.sample(binary_state=positive)
+ if success:
+ break
+ if not success:
+ logging.warning(
+ "Non-sampleable object conditions failed even after successful matching: {}".format(condition.body)
+ )
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = "Please run test sampling again."
+ return False, feedback
+
+ # Use ray casting for ontop and inside sampling for non-sampleable objects
+ for condition, positive in sampleable_obj_conditions:
+ if condition.STATE_NAME in ["inside", "ontop"]:
+ condition.kwargs["use_ray_casting_method"] = True
+
+ if len(self.sampling_orders) > 0:
+ # Pop non-sampleable objects
+ self.sampling_orders.pop(0)
+ for cur_batch in self.sampling_orders:
+ # First sample non-sliced conditions
+ for condition, positive in sampleable_obj_conditions:
+ if condition.STATE_NAME == "sliced":
+ continue
+ # Sample conditions that involve the current batch of objects
+ if condition.body[0] in cur_batch:
+ num_trials = 100
+ for _ in range(num_trials):
+ success = condition.sample(binary_state=positive)
+ if success:
+ break
+ if not success:
+ error_msg = "Sampleable object conditions failed: {} {}".format(
+ condition.STATE_NAME, condition.body
+ )
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ # Then sample non-sliced conditions
+ for condition, positive in sampleable_obj_conditions:
+ if condition.STATE_NAME != "sliced":
+ continue
+ # Sample conditions that involve the current batch of objects
+ if condition.body[0] in cur_batch:
+ success = condition.sample(binary_state=positive)
+ if not success:
+ error_msg = "Sampleable object conditions failed: {}".format(condition.body)
+ logging.warning(error_msg)
+ feedback["init_success"] = "no"
+ feedback["init_feedback"] = error_msg
+ return False, feedback
+
+ return True, feedback
+
+ def debug_sampling(self, scene_object_scope_filtered, non_sampleable_obj_conditions, goal_condition_set=None):
+ igibson.debug_sampling = True
+ for room_type in self.non_sampleable_object_scope:
+ for scene_obj in self.non_sampleable_object_scope[room_type]:
+ if len(scene_object_scope_filtered[room_type][scene_obj].keys()) != 0:
+ continue
+ for room_inst in self.non_sampleable_object_scope[room_type][scene_obj]:
+ for obj in self.non_sampleable_object_scope[room_type][scene_obj][room_inst]:
+ self.object_scope[scene_obj] = obj
+
+ for condition, positive in non_sampleable_obj_conditions:
+ # Only sample conditions that involve this object
+ if scene_obj not in condition.body:
+ continue
+ print(
+ "debug initial condition sampling",
+ room_type,
+ scene_obj,
+ room_inst,
+ obj.name,
+ condition.STATE_NAME,
+ condition.body,
+ )
+ obj_pos = obj.get_position()
+ # Set the pybullet camera to have a bird's eye view
+ # of the sampling process
+ p.resetDebugVisualizerCamera(
+ cameraDistance=3.0, cameraYaw=0, cameraPitch=-89.99999, cameraTargetPosition=obj_pos
+ )
+ success = condition.sample(binary_state=positive)
+ print("success", success)
+
+ if goal_condition_set is None:
+ continue
+
+ for goal_condition in goal_condition_set:
+ goal_condition = goal_condition.children[0]
+ if isinstance(goal_condition, Negation):
+ continue
+ if goal_condition.STATE_NAME not in KINEMATICS_STATES:
+ continue
+ if scene_obj not in goal_condition.body:
+ continue
+ print(
+ "goal condition sampling",
+ room_type,
+ scene_obj,
+ room_inst,
+ obj.name,
+ goal_condition.STATE_NAME,
+ goal_condition.body,
+ )
+ obj_pos = obj.get_position()
+ # Set the pybullet camera to have a bird's eye view
+ # of the sampling process
+ p.resetDebugVisualizerCamera(
+ cameraDistance=3.0, cameraYaw=0, cameraPitch=-89.99999, cameraTargetPosition=obj_pos
+ )
+ success = goal_condition.sample(binary_state=True)
+ print("success", success)
+
+ def clutter_scene(self):
+ if not self.load_clutter:
+ return
+
+ scene_id = self.scene.scene_id
+ clutter_ids = [""] + list(range(2, 5))
+ clutter_id = np.random.choice(clutter_ids)
+ clutter_scene = InteractiveIndoorScene(scene_id, "{}_clutter{}".format(scene_id, clutter_id))
+ existing_objects = [value for key, value in self.object_scope.items() if "floor.n.01" not in key]
+ self.simulator.import_non_colliding_objects(
+ objects=clutter_scene.objects_by_name, existing_objects=existing_objects, min_distance=0.5
+ )
+
+ def get_task_obs(self, env):
+ state = OrderedDict()
+ task_obs = np.zeros((self.task_obs_dim))
+ state["robot_pos"] = np.array(env.robots[0].get_position())
+ state["robot_orn"] = np.array(env.robots[0].get_rpy())
+
+ i = 0
+ for _, v in self.object_scope.items():
+ if isinstance(v, URDFObject):
+ state["obj_{}_valid".format(i)] = 1.0
+ state["obj_{}_pos".format(i)] = np.array(v.get_position())
+ state["obj_{}_orn".format(i)] = np.array(p.getEulerFromQuaternion(v.get_orientation()))
+ grasping_objects = env.robots[0].is_grasping(v.get_body_id())
+ for grasp_idx, grasping in enumerate(grasping_objects):
+ state["obj_{}_pos_in_gripper_{}".format(i, grasp_idx)] = float(grasping)
+ i += 1
+
+ state_list = []
+ for k, v in state.items():
+ if isinstance(v, list):
+ state_list.extend(v)
+ elif isinstance(v, tuple):
+ state_list.extend(list(v))
+ elif isinstance(v, np.ndarray):
+ state_list.extend(list(v))
+ elif isinstance(v, (float, int)):
+ state_list.append(v)
+ else:
+ raise ValueError("cannot serialize task obs")
+
+ assert len(state_list) <= len(task_obs)
+ task_obs[: len(state_list)] = state_list
+
+ return task_obs
+
+
+def main():
+ igbhvr_act_inst = iGBEHAVIORActivityInstance("assembling_gift_baskets", 0)
+ igbhvr_act_inst.initialize_simulator(mode="headless", scene_id="Rs_int")
+
+ for i in range(500):
+ igbhvr_act_inst.simulator.step()
+ success, failed_conditions = igbhvr_act_inst.check_success()
+ print("ACTIVITY SUCCESS:", success)
+ if not success:
+ print("FAILED CONDITIONS:", failed_conditions)
+ igbhvr_act_inst.simulator.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/tasks/bddl_backend.py b/igibson/activity/bddl_backend.py
similarity index 100%
rename from igibson/tasks/bddl_backend.py
rename to igibson/activity/bddl_backend.py
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_shelf_heights.py b/igibson/activity/get_obj_shelf_heights.py
similarity index 98%
rename from igibson/utils/data_utils/ext_object/scripts_wip/get_obj_shelf_heights.py
rename to igibson/activity/get_obj_shelf_heights.py
index 4a5f0f04c..039a5e15e 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_shelf_heights.py
+++ b/igibson/activity/get_obj_shelf_heights.py
@@ -29,8 +29,7 @@
out_dict = {}
obj = ArticulatedObject(filename=args.object_file, scale=1.0)
- simulator.import_object(obj)
- body_id = obj.get_body_id()
+ body_id = simulator.import_object(obj)
aabb = pb.getAABB(body_id)
size = [aabb[1][0] - aabb[0][0], aabb[1][1] - aabb[0][1], aabb[1][2] - aabb[0][2]]
diff --git a/igibson/utils/data_utils/sampling_task/batch_sampling_saver.py b/igibson/activity/tests/batch_sampling_saver.py
similarity index 100%
rename from igibson/utils/data_utils/sampling_task/batch_sampling_saver.py
rename to igibson/activity/tests/batch_sampling_saver.py
diff --git a/igibson/activity/tests/sampling_loader.py b/igibson/activity/tests/sampling_loader.py
new file mode 100644
index 000000000..fa07b3878
--- /dev/null
+++ b/igibson/activity/tests/sampling_loader.py
@@ -0,0 +1,50 @@
+import bddl
+from IPython import embed
+
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
+from igibson.simulator import Simulator
+
+bddl.set_backend("iGibson")
+
+task_choices = [
+ "packing_lunches_filtered",
+ "assembling_gift_baskets_filtered",
+ "organizing_school_stuff_filtered",
+ "re-shelving_library_books_filtered",
+ "serving_hors_d_oeuvres_filtered",
+ "putting_away_toys_filtered",
+ "putting_away_Christmas_decorations_filtered",
+ "putting_dishes_away_after_cleaning_filtered",
+ "cleaning_out_drawers_filtered",
+]
+task = "assembling_gift_baskets"
+task_id = 0
+scene = "Rs_int"
+num_init = 0
+
+igbhvr_act_inst = iGBEHAVIORActivityInstance(task, activity_definition=task_id)
+scene_kwargs = {
+ # 'load_object_categories': ['oven', 'fridge', 'countertop', 'cherry', 'sausage', 'tray'],
+ "not_load_object_categories": ["ceilings"],
+ "urdf_file": "{}_task_{}_{}_{}".format(scene, task, task_id, num_init),
+}
+simulator = Simulator(mode="headless", image_width=960, image_height=720)
+init_success = igbhvr_act_inst.initialize_simulator(
+ scene_id=scene,
+ simulator=simulator,
+ load_clutter=True,
+ should_debug_sampling=False,
+ scene_kwargs=scene_kwargs,
+ online_sampling=False,
+)
+print("success")
+embed()
+
+while True:
+ igbhvr_act_inst.simulator.step()
+ success, sorted_conditions = igbhvr_act_inst.check_success()
+ print("TASK SUCCESS:", success)
+ if not success:
+ print("FAILED CONDITIONS:", sorted_conditions["unsatisfied"])
+ else:
+ pass
diff --git a/igibson/utils/data_utils/sampling_task/sampling_saver.py b/igibson/activity/tests/sampling_saver.py
similarity index 64%
rename from igibson/utils/data_utils/sampling_task/sampling_saver.py
rename to igibson/activity/tests/sampling_saver.py
index 9bb9dc001..ecf93d30a 100644
--- a/igibson/utils/data_utils/sampling_task/sampling_saver.py
+++ b/igibson/activity/tests/sampling_saver.py
@@ -5,12 +5,14 @@
import bddl
import pybullet as p
-from IPython import embed
import igibson
-from igibson.envs.igibson_env import iGibsonEnv
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
from igibson.simulator import Simulator
-from igibson.utils.utils import parse_config, restoreState
+
+PARTIAL_RECACHE = {
+ # 'sorting_books': ['Ihlen_0_int'],
+}
def parse_args():
@@ -19,7 +21,6 @@ def parse_args():
"--task", type=str, required=True, help="Name of ATUS task matching BDDL parent folder in bddl."
)
parser.add_argument("--task_id", type=int, required=True, help="BDDL integer ID, matching suffix of bddl.")
- parser.add_argument("--scenes", type=str, nargs="+", help="A list of scenes to sample the BDDL description.")
parser.add_argument("--max_trials", type=int, default=1, help="Maximum number of trials to try sampling.")
parser.add_argument(
"--num_initializations", type=int, default=1, help="Number of initialization per BDDL per scene."
@@ -28,11 +29,19 @@ def parse_args():
return parser.parse_args()
+def remove_newly_added_objects(igbhvr_act_inst, state_id):
+ for sim_obj in igbhvr_act_inst.newly_added_objects:
+ igbhvr_act_inst.scene.remove_object(sim_obj)
+ for id in sim_obj.body_ids:
+ p.removeBody(id)
+ p.restoreState(state_id)
+
+
def main():
args = parse_args()
+ bddl.set_backend("iGibson")
task = args.task
task_id = args.task_id
- scenes = args.scenes
start_initialization = args.start_initialization
logging.warning("TASK: {}".format(task))
logging.warning("TASK ID: {}".format(task_id))
@@ -42,53 +51,48 @@ def main():
with open(scene_json) as f:
activity_to_scenes = json.load(f)
- if scenes is not None:
- scene_choices = scenes
- elif task in activity_to_scenes:
- scene_choices = activity_to_scenes[task]
- else:
- scene_choices = [item for item in get_available_ig_scenes() if item.endswith("_int")]
+ if task not in activity_to_scenes:
+ return
+
+ scene_choices = activity_to_scenes[task]
+ if task in PARTIAL_RECACHE:
+ scene_choices = PARTIAL_RECACHE[task]
+ # scene_choices = ['Rs_int']
logging.warning(("SCENE CHOICES", scene_choices))
num_initializations = args.num_initializations
num_trials = args.max_trials
-
- config_file = os.path.join(igibson.example_config_path, "behavior_vr.yaml")
- env_config = parse_config(config_file)
- env_config["task"] = task
- env_config["task_id"] = task_id
- env_config["online_sampling"] = True
- env_config["load_clutter"] = True
-
+ simulator = Simulator(mode="headless", image_width=960, image_height=720, device_idx=0)
+ scene_kwargs = {}
+ igbhvr_act_inst = iGBEHAVIORActivityInstance(task, activity_definition=task_id)
for scene_id in scene_choices:
logging.warning(("TRY SCENE:", scene_id))
- env_config["scene_id"] = scene_id
+
for init_id in range(start_initialization, start_initialization + num_initializations):
urdf_path = "{}_task_{}_{}_{}".format(scene_id, task, task_id, init_id)
full_path = os.path.join(igibson.ig_dataset_path, "scenes", scene_id, "urdf", urdf_path + ".urdf")
if os.path.isfile(full_path):
logging.warning("Already cached: {}".format(full_path))
continue
-
for _ in range(num_trials):
- env = iGibsonEnv(
- config_file=env_config,
+ success = igbhvr_act_inst.initialize_simulator(
+ simulator=simulator,
+ scene_id=scene_id,
mode="headless",
+ load_clutter=True,
+ should_debug_sampling=False,
+ scene_kwargs=scene_kwargs,
+ online_sampling=True,
)
- success = env.task.initialized
if success:
break
- else:
- env.close()
if success:
sim_obj_to_bddl_obj = {
- value.name: {"object_scope": key} for key, value in env.task.object_scope.items()
+ value.name: {"object_scope": key} for key, value in igbhvr_act_inst.object_scope.items()
}
- env.scene.save_modified_urdf(urdf_path, sim_obj_to_bddl_obj)
+ igbhvr_act_inst.scene.save_modified_urdf(urdf_path, sim_obj_to_bddl_obj)
logging.warning(("Saved:", urdf_path))
- env.close()
- embed()
if __name__ == "__main__":
diff --git a/igibson/activity/tests/sampling_test.py b/igibson/activity/tests/sampling_test.py
new file mode 100644
index 000000000..b4d7764a9
--- /dev/null
+++ b/igibson/activity/tests/sampling_test.py
@@ -0,0 +1,33 @@
+import bddl
+from IPython import embed
+
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.simulator import Simulator
+
+bddl.set_backend("iGibson")
+
+activity = "assembling_gift_baskets"
+scene_id = "Rs_int"
+
+igbhvr_act_inst = iGBEHAVIORActivityInstance(activity, activity_definition=0)
+scene_kwargs = {
+ "not_load_object_categories": ["ceilings"],
+}
+settings = MeshRendererSettings(texture_scale=1)
+simulator = Simulator(mode="headless", image_width=960, image_height=720, rendering_settings=settings)
+init_success = igbhvr_act_inst.initialize_simulator(
+ scene_id=scene_id, simulator=simulator, load_clutter=False, should_debug_sampling=True, scene_kwargs=scene_kwargs
+)
+assert init_success
+print("success")
+embed()
+
+while True:
+ igbhvr_act_inst.simulator.step()
+ success, sorted_conditions = igbhvr_act_inst.check_success()
+ print("TASK SUCCESS:", success)
+ if not success:
+ print("FAILED CONDITIONS:", sorted_conditions["unsatisfied"])
+ else:
+ pass
diff --git a/igibson/challenge/behavior_challenge.py b/igibson/challenge/behavior_challenge.py
new file mode 100644
index 000000000..4066d662a
--- /dev/null
+++ b/igibson/challenge/behavior_challenge.py
@@ -0,0 +1,189 @@
+import json
+import logging
+import os
+import shutil
+from collections import defaultdict
+
+import bddl
+import numpy as np
+
+import igibson
+from igibson.envs.behavior_env import BehaviorEnv
+from igibson.metrics.agent import BehaviorRobotMetric
+from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement
+from igibson.metrics.task import TaskMetric
+from igibson.utils.utils import parse_config
+
+logging.getLogger().setLevel(logging.WARNING)
+
+
+def get_metrics_callbacks():
+ metrics = [
+ KinematicDisarrangement(),
+ LogicalDisarrangement(),
+ BehaviorRobotMetric(),
+ TaskMetric(),
+ ]
+
+ return (
+ [metric.start_callback for metric in metrics],
+ [metric.step_callback for metric in metrics],
+ [metric.end_callback for metric in metrics],
+ [metric.gather_results for metric in metrics],
+ )
+
+
+class BehaviorChallenge(object):
+ def __init__(self):
+ self.config_file = os.environ["CONFIG_FILE"]
+ self.split = os.environ["SPLIT"]
+ self.output_dir = os.environ["OUTPUT_DIR"]
+
+ def submit(self, agent):
+ env_config = parse_config(self.config_file)
+
+ if self.split == "minival":
+ # Only eval one activity in the config file
+ tasks = [env_config["task"]]
+ else:
+ # Eval all 100 activities
+ tasks = sorted(
+ [
+ item
+ for item in os.listdir(os.path.join(os.path.dirname(bddl.__file__), "activity_conditions"))
+ if item != "domain_igibson.bddl"
+ ]
+ )
+ assert len(tasks) == 100
+
+ log_path = os.path.join(self.output_dir, "per_episode_metrics.json")
+ summary_log_path = os.path.join(self.output_dir, "aggregated_metrics.json")
+
+ self_reported_log_path = os.path.join(
+ self.output_dir, "..", "participant_reported_results", "per_episode_metrics.json"
+ )
+ self_reported_summary_log_path = os.path.join(
+ self.output_dir, "..", "participant_reported_results", "aggregated_metrics.json"
+ )
+ if os.path.exists(self_reported_log_path):
+ shutil.copyfile(self_reported_log_path, log_path)
+ print("Per episode eval results copied from self-reported results %s" % log_path)
+ with open(self_reported_log_path) as f:
+ self_reported_log = json.load(f)
+ assert len(self_reported_log) == len(tasks) * 9
+
+ if os.path.exists(self_reported_summary_log_path):
+ shutil.copyfile(self_reported_summary_log_path, summary_log_path)
+ print("Aggregated eval results copied from self-reported results %s" % summary_log_path)
+ return
+
+ episode = 0
+ per_episode_metrics = {}
+ scene_json = os.path.join(os.path.dirname(bddl.__file__), "../utils", "activity_to_preselected_scenes.json")
+
+ with open(scene_json) as f:
+ activity_to_scenes = json.load(f)
+
+ with open(os.path.join(igibson.ig_dataset_path, "metadata", "behavior_activity_statistics.json")) as f:
+ activity_metadata = json.load(f)
+
+ for task in tasks:
+ assert task in activity_to_scenes
+ scenes = sorted(set(activity_to_scenes[tasks[0]]))
+ num_scenes = len(scenes)
+ human_demo_mean_step = activity_metadata[task]["mean"]
+ env_config["max_step"] = human_demo_mean_step * 2 # adjust env_config['max_step'] based on the human
+ # demonstration, we give agent 2x steps of average human demonstration across all possible scenes
+
+ assert num_scenes <= 3
+
+ # Evaluate 9 activity instances in the training set for now
+ if num_scenes == 3:
+ scene_instance_ids = {scenes[0]: range(3), scenes[1]: range(3), scenes[2]: range(3)}
+ elif num_scenes == 2:
+ scene_instance_ids = {scenes[0]: range(4), scenes[1]: range(5)}
+ else:
+ scene_instance_ids = {scenes[0]: range(9)}
+
+ for scene_id, instance_ids in scene_instance_ids.items():
+ env_config["scene_id"] = scene_id
+ for instance_id in instance_ids:
+ env = BehaviorEnv(
+ config_file=env_config,
+ mode="all",
+ action_timestep=1.0 / 30.0,
+ physics_timestep=1.0 / 120.0,
+ instance_id=instance_id,
+ )
+ start_callbacks, step_callbacks, end_callbacks, data_callbacks = get_metrics_callbacks()
+ for callback in start_callbacks:
+ callback(env.task, None)
+ agent.reset()
+ state = env.reset()
+ while True:
+ action = agent.act(state)
+ state, reward, done, info = env.step(action)
+ for callback in step_callbacks:
+ callback(env.task, None)
+ if done:
+ break
+ for callback in end_callbacks:
+ callback(env.task, None)
+ metrics_summary = {}
+ for callback in data_callbacks:
+ metrics_summary.update(callback())
+
+ metrics_summary["task"] = task
+ per_episode_metrics[episode] = metrics_summary
+ episode += 1
+ env.close()
+
+ with open(log_path, "w+") as f:
+ json.dump(per_episode_metrics, f)
+ print("Per episode eval results saved to %s" % log_path)
+
+ aggregated_metrics = {}
+ success_score = []
+ simulator_time = []
+ kinematic_disarrangement = []
+ logical_disarrangement = []
+ distance_navigated = []
+ displacement_of_hands = []
+
+ task_to_mean_success_score = defaultdict(list)
+ task_scores = []
+
+ for episode, metric in per_episode_metrics.items():
+ task_to_mean_success_score[metric["task"]].append(metric["q_score"]["timestep"][-1])
+
+ for task, scores in task_to_mean_success_score.items():
+ task_scores.append(np.mean(scores))
+
+ task_scores = sorted(task_scores, reverse=True)
+
+ for episode, metric in per_episode_metrics.items():
+ success_score.append(metric["q_score"]["timestep"][-1])
+ simulator_time.append(metric["time"]["simulator_time"])
+ kinematic_disarrangement.append(metric["kinematic_disarrangement"]["relative"])
+ logical_disarrangement.append(metric["logical_disarrangement"]["relative"])
+ distance_navigated.append(np.sum(metric["agent_distance"]["timestep"]["body"]))
+ displacement_of_hands.append(
+ np.sum(metric["grasp_distance"]["timestep"]["left_hand"])
+ + np.sum(metric["grasp_distance"]["timestep"]["right_hand"])
+ )
+
+ aggregated_metrics["Success Score"] = np.mean(success_score)
+ aggregated_metrics["Success Score Top 5"] = np.mean(np.array(task_scores)[:5])
+ aggregated_metrics["Simulated Time"] = np.mean(simulator_time)
+ aggregated_metrics["Kinematic Disarrangement"] = np.mean(kinematic_disarrangement)
+ aggregated_metrics["Logical Disarrangement"] = np.mean(logical_disarrangement)
+ aggregated_metrics["Distance Navigated"] = np.mean(distance_navigated)
+ aggregated_metrics["Displacement of Hands"] = np.mean(displacement_of_hands)
+ with open(summary_log_path, "w+") as f:
+ json.dump(aggregated_metrics, f)
+ print("Aggregated eval results saved to %s" % summary_log_path)
+
+
+if __name__ == "__main__":
+ challenge = BehaviorChallenge()
+ challenge.submit(None)
diff --git a/igibson/configs/behavior_full_observability_fetch.yaml b/igibson/configs/behavior_full_observability_fetch.yaml
deleted file mode 100644
index 763a01242..000000000
--- a/igibson/configs/behavior_full_observability_fetch.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-# scene
-scene: igibson
-scene_id: Beechwood_1_int
-clutter: false
-build_graph: true
-load_texture: true
-pybullet_load_texture: true
-should_open_all_doors: false
-
-# domain randomization
-texture_randomization_freq: null
-object_randomization_freq: null
-
-# robot
-robot:
- name: Fetch
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- assisted_grasping_mode: strict
- rigid_trunk: false
- default_trunk_offset: 0.365
- default_arm_pose: diagonal30
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- kv: 2.0
- gripper:
- name: ParallelJawGripperController
- mode: binary
- camera:
- name: JointController
- use_delta_commands: False
-
-# task
-task: cleaning_cupboards
-task_id: 0
-instance_id: 0
-online_sampling: false
-
-# discount factor
-discount_factor: 0.99
-
-# termination condition
-max_step: 500
-
-# sensor spec
-output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
-# image
-fisheye: false
-image_width: 128
-image_height: 128
-vertical_fov: 120
-# depth
-depth_low: 0.0
-depth_high: 10.0
-
-# sensor noise
-depth_noise_rate: 0.0
-scan_noise_rate: 0.0
diff --git a/igibson/configs/behavior_onboard_sensing_fetch.yaml b/igibson/configs/behavior_onboard_sensing_fetch.yaml
deleted file mode 100644
index d30be756f..000000000
--- a/igibson/configs/behavior_onboard_sensing_fetch.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-# scene
-scene: igibson
-scene_id: Beechwood_1_int
-clutter: false
-build_graph: true
-load_texture: true
-pybullet_load_texture: true
-should_open_all_doors: false
-
-# domain randomization
-texture_randomization_freq: null
-object_randomization_freq: null
-
-# robot
-robot:
- name: Fetch
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- assisted_grasping_mode: null
- rigid_trunk: false
- default_trunk_offset: 0.365
- default_arm_pose: diagonal30
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- kv: 2.0
- gripper:
- name: ParallelJawGripperController
- mode: binary
- camera:
- name: JointController
- use_delta_commands: False
-
-# task
-task: cleaning_cupboards
-task_id: 0
-instance_id: 0
-online_sampling: false
-
-# discount factor
-discount_factor: 0.99
-
-# termination condition
-max_step: 500
-
-# sensor spec
-output: [proprioception, rgb, highlight, depth, seg, ins_seg]
-# image
-fisheye: false
-image_width: 128
-image_height: 128
-vertical_fov: 120
-# depth
-depth_low: 0.0
-depth_high: 10.0
-
-# sensor noise
-depth_noise_rate: 0.0
-scan_noise_rate: 0.0
diff --git a/igibson/configs/behavior_vr.yaml b/igibson/configs/behavior_vr.yaml
deleted file mode 100644
index 61ae59f1f..000000000
--- a/igibson/configs/behavior_vr.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-# scene
-scene: igibson
-scene_id: Beechwood_1_int
-clutter: false
-build_graph: true
-load_texture: true
-pybullet_load_texture: true
-should_open_all_doors: false
-
-# domain randomization
-texture_randomization_freq: null
-object_randomization_freq: null
-
-# robot
-robot:
- name: BehaviorRobot
-
-# task
-task: cleaning_cupboards
-task_id: 0
-instance_id: 0
-online_sampling: false
-should_activate_behavior_robot: false
-should_highlight_task_relevant_objs: false
-
-# discount factor
-discount_factor: 0.99
-
-# termination condition
-max_step: 500
-
-# sensor spec
-output: [proprioception]
-# image
-fisheye: false
-image_width: 128
-image_height: 128
-vertical_fov: 120
-# depth
-depth_low: 0.0
-depth_high: 10.0
-
-# sensor noise
-depth_noise_rate: 0.0
-scan_noise_rate: 0.0
diff --git a/igibson/configs/controllers/dd.yaml b/igibson/configs/controllers/dd.yaml
deleted file mode 100644
index a109e7944..000000000
--- a/igibson/configs/controllers/dd.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# Example Differential Drive control config (shown for base control)
-# See igibson/controllers/dd_controller for docstring of arguments
-# Arguments below are the arguments that should be specified by external user (other kwargs
-# used in constructor are generated automatically at runtime)
-robot:
- controller_config:
- base:
- name: DifferentialDriveController
- command_input_limits: default
- command_output_limits: default
\ No newline at end of file
diff --git a/igibson/configs/controllers/ik.yaml b/igibson/configs/controllers/ik.yaml
deleted file mode 100644
index 0db6cb0bf..000000000
--- a/igibson/configs/controllers/ik.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Example IK config (shown for arm control)
-# See igibson/controllers/ik_controller for docstring of arguments
-# Arguments below are the arguments that should be specified by external user (other kwargs
-# used in constructor are generated automatically at runtime)
-robot:
- controller_config:
- arm:
- name: InverseKinematicsController
- command_input_limits: default
- command_output_limits:
- - [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]
- - [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]
- kv: 2.0
- mode: pose_delta_ori
- smoothing_filter_size: 2
- workspace_pose_limiter: null
- joint_range_tolerance: 0.01
\ No newline at end of file
diff --git a/igibson/configs/controllers/joint.yaml b/igibson/configs/controllers/joint.yaml
deleted file mode 100644
index a967a4c88..000000000
--- a/igibson/configs/controllers/joint.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# Example Joint control config (shown for arm control)
-# See igibson/controllers/joint_controller for docstring of arguments
-# Arguments below are the arguments that should be specified by external user (other kwargs
-# used in constructor are generated automatically at runtime)
-robot:
- controller_config:
- arm:
- name: JointController
- motor_type: velocity
- command_input_limits: default
- command_output_limits: default
- use_delta_commands: false
- use_compliant_mode: true
\ No newline at end of file
diff --git a/igibson/configs/controllers/null_gripper.yaml b/igibson/configs/controllers/null_gripper.yaml
deleted file mode 100644
index 845df9023..000000000
--- a/igibson/configs/controllers/null_gripper.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-# Example Null Gripper control config (shown for gripper control)
-# See igibson/controllers/null_gripper_controller for docstring of arguments
-# Arguments below are the arguments that should be specified by external user (other kwargs
-# used in constructor are generated automatically at runtime)
-robot:
- controller_config:
- gripper:
- name: NullGripperController
- # no other args to specify (this is a dummy controller)
\ No newline at end of file
diff --git a/igibson/configs/controllers/parallel_jaw_gripper.yaml b/igibson/configs/controllers/parallel_jaw_gripper.yaml
deleted file mode 100644
index 12f9282a2..000000000
--- a/igibson/configs/controllers/parallel_jaw_gripper.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# Example Parallel Jaw Gripper control config (shown for arm control)
-# See igibson/controllers/parallel_jaw_gripper_controller for docstring of arguments
-# Arguments below are the arguments that should be specified by external user (other kwargs
-# used in constructor are generated automatically at runtime)
-robot:
- controller_config:
- arm:
- name: ParallelJawGripperController
- motor_type: position
- command_input_limits: default
- command_output_limits: default
- mode: binary
- limit_tolerance: 0.01
\ No newline at end of file
diff --git a/igibson/configs/fetch_reaching.yaml b/igibson/configs/fetch_reaching.yaml
deleted file mode 100644
index a73f0ca36..000000000
--- a/igibson/configs/fetch_reaching.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-# scene
-scene: igibson
-scene_id: Rs_int
-build_graph: true
-load_texture: true
-pybullet_load_texture: true
-trav_map_type: no_obj
-trav_map_resolution: 0.1
-trav_map_erosion: 3
-should_open_all_doors: true
-
-# domain randomization
-texture_randomization_freq: null
-object_randomization_freq: null
-
-# robot
-robot:
- name: Fetch
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- assisted_grasping_mode: null
- rigid_trunk: false
- default_trunk_offset: 0.365
- default_arm_pose: diagonal30
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- kv: 2.0
- gripper:
- name: ParallelJawGripperController
- mode: binary
- camera:
- name: JointController
- use_delta_commands: False
-
-# task
-task: reaching_random
-target_dist_min: 1.0
-target_dist_max: 10.0
-goal_format: polar
-task_obs_dim: 4
-
-# reward
-reward_type: l2
-success_reward: 10.0
-potential_reward_weight: 1.0
-collision_reward_weight: -0.1
-
-# discount factor
-discount_factor: 0.99
-
-# termination condition
-dist_tol: 0.36 # body width
-max_step: 500
-max_collisions_allowed: 500
-
-# misc config
-initial_pos_z_offset: 0.1
-collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
-
-# sensor spec
-output: [task_obs, rgb, depth, scan]
-# image
-# Primesense Carmine 1.09 short-range RGBD sensor
-# http://xtionprolive.com/primesense-carmine-1.09
-fisheye: false
-image_width: 128
-image_height: 128
-vertical_fov: 90
-# depth
-depth_low: 0.35
-depth_high: 3.0
-# scan
-# SICK TIM571 scanning range finder
-# https://docs.fetchrobotics.com/robot_hardware.html
-# n_horizontal_rays is originally 661, sub-sampled 1/3
-n_horizontal_rays: 220
-n_vertical_beams: 1
-laser_linear_range: 25.0
-laser_angular_range: 220.0
-min_laser_dist: 0.05
-laser_link_name: laser_link
-
-# sensor noise
-depth_noise_rate: 0.0
-scan_noise_rate: 0.0
-
-# visual objects
-visible_target: true
-visible_path: false
-
diff --git a/igibson/configs/robots/ant.yaml b/igibson/configs/robots/ant.yaml
deleted file mode 100644
index a95021661..000000000
--- a/igibson/configs/robots/ant.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-# Example Ant config
-robot:
- name: Ant
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - joint_qpos
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: JointController
\ No newline at end of file
diff --git a/igibson/configs/robots/fetch.yaml b/igibson/configs/robots/fetch.yaml
deleted file mode 100644
index 70002a46e..000000000
--- a/igibson/configs/robots/fetch.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Example Fetch config
-robot:
- name: Fetch
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - eef_pos
- - eef_quat
- - trunk_qpos
- - arm_qpos_sin
- - arm_qpos_cos
- - gripper_qpos
- - grasp
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- assisted_grasping_mode: null
- rigid_trunk: false
- default_trunk_offset: 0.365
- default_arm_pose: vertical
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- gripper:
- name: ParallelJawGripperController
- camera:
- name: JointController
\ No newline at end of file
diff --git a/igibson/configs/robots/freight.yaml b/igibson/configs/robots/freight.yaml
deleted file mode 100644
index c264fc3ac..000000000
--- a/igibson/configs/robots/freight.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Example Freight config
-robot:
- name: Freight
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - dd_base_lin_vel
- - dd_base_ang_vel
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
\ No newline at end of file
diff --git a/igibson/configs/robots/husky.yaml b/igibson/configs/robots/husky.yaml
deleted file mode 100644
index 77b77201b..000000000
--- a/igibson/configs/robots/husky.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Example Husky config
-robot:
- name: Husky
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - joint_qpos
- - joint_qvel
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: JointController
\ No newline at end of file
diff --git a/igibson/configs/robots/jr2.yaml b/igibson/configs/robots/jr2.yaml
deleted file mode 100644
index 2fa63e554..000000000
--- a/igibson/configs/robots/jr2.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Example JR2 config
-robot:
- name: JR2
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - eef_pos
- - eef_quat
- - arm_qpos_sin
- - arm_qpos_cos
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- assisted_grasping_mode: null
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- gripper:
- name: NullGripperController
\ No newline at end of file
diff --git a/igibson/configs/robots/locobot.yaml b/igibson/configs/robots/locobot.yaml
deleted file mode 100644
index 58806ffd9..000000000
--- a/igibson/configs/robots/locobot.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Example Locobot config
-robot:
- name: Locobot
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - dd_base_lin_vel
- - dd_base_ang_vel
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
\ No newline at end of file
diff --git a/igibson/configs/robots/turtlebot.yaml b/igibson/configs/robots/turtlebot.yaml
deleted file mode 100644
index 03394462b..000000000
--- a/igibson/configs/robots/turtlebot.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Example Turtlebot config
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- proprio_obs:
- - dd_base_lin_vel
- - dd_base_ang_vel
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
\ No newline at end of file
diff --git a/igibson/controllers/__init__.py b/igibson/controllers/__init__.py
deleted file mode 100644
index 4367295c5..000000000
--- a/igibson/controllers/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from igibson.controllers.controller_base import (
- REGISTERED_CONTROLLERS,
- REGISTERED_LOCOMOTION_CONTROLLERS,
- REGISTERED_MANIPULATION_CONTROLLERS,
- ControlType,
- LocomotionController,
- ManipulationController,
-)
-from igibson.controllers.dd_controller import DifferentialDriveController
-from igibson.controllers.ik_controller import InverseKinematicsController
-from igibson.controllers.joint_controller import JointController
-from igibson.controllers.null_gripper_controller import NullGripperController
-from igibson.controllers.parallel_jaw_gripper_controller import ParallelJawGripperController
-from igibson.utils.python_utils import assert_valid_key, extract_class_init_kwargs_from_dict
-
-
-def create_controller(name, **kwargs):
- """
- Creates a controller of type @name with corresponding necessary keyword arguments @kwargs
-
- :param name: str, type of controller to use (e.g. JointController, InverseKinematicsController, etc.)
- :param kwargs: Any relevant keyword arguments to pass to the controller
-
- :return Controller: created controller
- """
- assert_valid_key(key=name, valid_keys=REGISTERED_CONTROLLERS, name="controller")
- controller_cls = REGISTERED_CONTROLLERS[name]
-
- return controller_cls(**kwargs)
diff --git a/igibson/controllers/controller_base.py b/igibson/controllers/controller_base.py
deleted file mode 100644
index 45e695c57..000000000
--- a/igibson/controllers/controller_base.py
+++ /dev/null
@@ -1,314 +0,0 @@
-from collections import Iterable
-
-import numpy as np
-
-from igibson.utils.python_utils import assert_valid_key
-
-# Global dicts that will contain mappings
-REGISTERED_CONTROLLERS = {}
-REGISTERED_LOCOMOTION_CONTROLLERS = {}
-REGISTERED_MANIPULATION_CONTROLLERS = {}
-
-
-def register_controller(cls):
- if cls.__name__ not in REGISTERED_CONTROLLERS:
- REGISTERED_CONTROLLERS[cls.__name__] = cls
-
-
-def register_locomotion_controller(cls):
- if cls.__name__ not in REGISTERED_LOCOMOTION_CONTROLLERS:
- REGISTERED_LOCOMOTION_CONTROLLERS[cls.__name__] = cls
-
-
-def register_manipulation_controller(cls):
- if cls.__name__ not in REGISTERED_MANIPULATION_CONTROLLERS:
- REGISTERED_MANIPULATION_CONTROLLERS[cls.__name__] = cls
-
-
-# Define macros
-class ControlType:
- POSITION = 0
- VELOCITY = 1
- TORQUE = 2
- _MAPPING = {
- "position": POSITION,
- "velocity": VELOCITY,
- "torque": TORQUE,
- }
- VALID_TYPES = set(_MAPPING.values())
- VALID_TYPES_STR = set(_MAPPING.keys())
-
- @classmethod
- def get_type(cls, type_str):
- """
- :param type_str: One of "position", "velocity", or "torque" (any case), and maps it
- to the corresponding type
-
- :return ControlType: control type corresponding to the associated string
- """
- assert_valid_key(key=type_str.lower(), valid_keys=cls._MAPPING, name="control type")
- return cls._MAPPING[type_str.lower()]
-
-
-class BaseController:
- """
- An abstract class with interface for mapping specific types of commands to deployable control signals.
- """
-
- def __init_subclass__(cls, **kwargs):
- """
- Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
- user additions. This way, users can add their custom controller by simply extending this Controller class,
- and it will automatically be registered internally. This allows users to then specify their controller
- directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
- in our code.
- """
- register_controller(cls)
-
- def __init__(
- self,
- control_freq,
- control_limits,
- joint_idx,
- command_input_limits="default",
- command_output_limits="default",
- ):
- """
- :param control_freq: int, controller loop frequency
- :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
- control signal. Should specify per-actuator type limits, i.e.:
-
- "position": [[min], [max]]
- "velocity": [[min], [max]]
- "torque": [[min], [max]]
- "has_limit": [...bool...]
-
- Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
- :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
- controller-relevant values during control computations
- :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
- if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
- If None, no clipping will be used. If "default", range will be set to (-1, 1)
- :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
- is the min/max scaled command. If both this value and @command_input_limits is not None,
- then all inputted command values will be scaled from the input range to the output range.
- If either is None, no scaling will be used. If "default", then this range will automatically be set
- to the @control_limits entry corresponding to self.control_type
- """
- # Store arguments
- self.control_freq = control_freq
- self.control_limits = {}
- for motor_type in {"position", "velocity", "torque"}:
- assert (
- motor_type in control_limits
- ), "Expected motor_type {} to be in control_limits, but does not exist.".format(motor_type)
- self.control_limits[ControlType.get_type(motor_type)] = [
- np.array(control_limits[motor_type][0]),
- np.array(control_limits[motor_type][1]),
- ]
- assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
- self.joint_has_limits = control_limits["has_limit"]
- self._joint_idx = joint_idx
-
- # Initialize some other variables that will be filled in during runtime
- self.control = None
- self._command = None
- self._command_scale_factor = None
- self._command_output_transform = None
- self._command_input_transform = None
-
- # Standardize command input / output limits to be (min_array, max_array)
- command_input_limits = (-1.0, 1.0) if command_input_limits == "default" else command_input_limits
- command_output_limits = (
- (
- np.array(self.control_limits[self.control_type][0])[self.joint_idx],
- np.array(self.control_limits[self.control_type][1])[self.joint_idx],
- )
- if command_output_limits == "default"
- else command_output_limits
- )
- self.command_input_limits = (
- None
- if command_input_limits is None
- else (
- self.nums2array(command_input_limits[0], self.command_dim),
- self.nums2array(command_input_limits[1], self.command_dim),
- )
- )
- self.command_output_limits = (
- None
- if command_output_limits is None
- else (
- self.nums2array(command_output_limits[0], self.command_dim),
- self.nums2array(command_output_limits[1], self.command_dim),
- )
- )
-
- def _preprocess_command(self, command):
- """
- Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits.
- If self.command_input_limits is None, then no clipping will occur. If either self.command_input_limits
- or self.command_output_limits is None, then no scaling will occur.
-
- :param command: Array[float] or float, Inputted command vector
- :return Array[float]: Processed command vector
- """
- # Make sure command is a np.array
- command = np.array([command]) if type(command) in {int, float} else np.array(command)
- # We only clip and / or scale if self.command_input_limits exists
- if self.command_input_limits is not None:
- # Clip
- command = command.clip(*self.command_input_limits)
- if self.command_output_limits is not None:
- # If we haven't calculated how to scale the command, do that now (once)
- if self._command_scale_factor is None:
- self._command_scale_factor = abs(
- self.command_output_limits[1] - self.command_output_limits[0]
- ) / abs(self.command_input_limits[1] - self.command_input_limits[0])
- self._command_output_transform = (
- self.command_output_limits[1] + self.command_output_limits[0]
- ) / 2.0
- self._command_input_transform = (self.command_input_limits[1] + self.command_input_limits[0]) / 2.0
- # Scale command
- command = (
- command - self._command_input_transform
- ) * self._command_scale_factor + self._command_output_transform
-
- # Return processed command
- return command
-
- def update_command(self, command):
- """
- Updates inputted @command internally.
-
- :param command: Array[float], inputted command to store internally in this controller
- """
- # Sanity check the command
- assert len(command) == self.command_dim, "Commands must be dimension {}, got dim {} instead.".format(
- self.command_dim, len(command)
- )
- # Preprocess and store inputted command
- self._command = self._preprocess_command(np.array(command))
-
- def clip_control(self, control):
- """
- Clips the inputted @control signal based on @control_limits.
-
- :param control: Array[float], control signal to clip
-
- :return Array[float]: Clipped control signal
- """
- clipped_control = control.clip(
- self.control_limits[self.control_type][0][self.joint_idx],
- self.control_limits[self.control_type][1][self.joint_idx],
- )
- idx = (
- self.joint_has_limits[self.joint_idx]
- if self.control_type == ControlType.POSITION
- else [True] * self.control_dim
- )
- control[idx] = clipped_control[idx]
- return control
-
- def step(self, control_dict):
- """
- Take a controller step.
-
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation
-
- :return Array[float]: numpy array of outputted control signals
- """
- control = self._command_to_control(command=self._command, control_dict=control_dict)
- self.control = self.clip_control(control=control)
- return self.control
-
- def reset(self):
- """
- Resets this controller. Should be implemented by subclass.
- """
- raise NotImplementedError
-
- def _command_to_control(self, command, control_dict):
- """
- Converts the (already preprocessed) inputted @command into deployable (non-clipped!) control signal.
- Should be implemented by subclass.
-
- :param command: Array[float], desired (already preprocessed) command to convert into control signals
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation
-
- :return: Array[float], outputted (non-clipped!) control signal to deploy
- """
- raise NotImplementedError
-
- @staticmethod
- def nums2array(nums, dim):
- """
- Convert input @nums into numpy array of length @dim. If @nums is a single number, broadcasts it to the
- corresponding dimension size @dim before converting into a numpy array
- Args:
- nums (numeric or Iterable): Either single value or array of numbers
- dim (int): Size of array to broadcast input to env.sim.data.actuator_force
- Returns:
- np.array: Array filled with values specified in @nums
- """
- # First run sanity check to make sure no strings are being inputted
- if isinstance(nums, str):
- raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!")
-
- # Check if input is an Iterable, if so, we simply convert the input to np.array and return
- # Else, input is a single value, so we map to a numpy array of correct size and return
- return np.array(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums
-
- @property
- def control_type(self):
- """
- :return ControlType: Type of control returned by this controller
- """
- raise NotImplementedError
-
- @property
- def command_dim(self):
- """
- :return int: Expected size of inputted commands
- """
- raise NotImplementedError
-
- @property
- def control_dim(self):
- """
- :return int: Expected size of outputted controls
- """
- return len(self.joint_idx)
-
- @property
- def joint_idx(self):
- """
- :return Array[int]: Joint indices corresponding to the specific joints being controlled by this robot
- """
- return np.array(self._joint_idx)
-
-
-class LocomotionController(BaseController):
- """
- Controller to control locomotion. All implemented controllers that encompass locomotion capabilities should extend
- from this class.
- """
-
- def __init_subclass__(cls, **kwargs):
- # Register as part of locomotion controllers
- super().__init_subclass__(**kwargs)
- register_locomotion_controller(cls)
-
-
-class ManipulationController(BaseController):
- """
- Controller to control manipulation. All implemented controllers that encompass manipulation capabilities
- should extend from this class.
- """
-
- def __init_subclass__(cls, **kwargs):
- # Register as part of locomotion controllers
- super().__init_subclass__(**kwargs)
- register_manipulation_controller(cls)
diff --git a/igibson/controllers/dd_controller.py b/igibson/controllers/dd_controller.py
deleted file mode 100644
index 5e0436f82..000000000
--- a/igibson/controllers/dd_controller.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import numpy as np
-
-from igibson.controllers import ControlType, LocomotionController
-
-
-class DifferentialDriveController(LocomotionController):
- """
- Differential drive (DD) controller for controlling two independently controlled wheeled joints.
-
- Each controller step consists of the following:
- 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
- 2. Convert desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control signals
- 3. Clips the resulting command by the joint velocity limits
- """
-
- def __init__(
- self,
- wheel_radius,
- wheel_axle_length,
- control_freq,
- control_limits,
- joint_idx,
- command_input_limits="default",
- command_output_limits="default",
- ):
- """
- :param wheel_radius: float, radius of the wheels (both assumed to be same radius)
- :param wheel_axle_length: float, perpendicular distance between the two wheels
- :param control_freq: int, controller loop frequency
- :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
- control signal. Should specify per-actuator type limits, i.e.:
-
- "position": [[min], [max]]
- "velocity": [[min], [max]]
- "torque": [[min], [max]]
- "has_limit": [...bool...]
-
- Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
- Assumes order is [left_wheel, right_wheel] for each set of values.
- :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
- controller-relevant values during control computations
- :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
- if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
- If None, no clipping will be used. If "default", range will be set to (-1, 1)
- :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
- is the min/max scaled command. If both this value and @command_input_limits is not None,
- then all inputted command values will be scaled from the input range to the output range.
- If either is None, no scaling will be used. If "default", then this range will automatically be set
- to the maximum linear and angular velocities calculated from @wheel_radius, @wheel_axle_length, and
- @control_limits velocity limits entry
- """
- # Store internal variables
- self.wheel_radius = wheel_radius
- self.wheel_axle_halflength = wheel_axle_length / 2.0
-
- # If we're using default command output limits, map this to maximum linear / angular velocities
- if command_output_limits == "default":
- min_vels = control_limits["velocity"][0][joint_idx]
- assert (
- min_vels[0] == min_vels[1]
- ), "Differential drive requires both wheel joints to have same min velocities!"
- max_vels = control_limits["velocity"][1][joint_idx]
- assert (
- max_vels[0] == max_vels[1]
- ), "Differential drive requires both wheel joints to have same max velocities!"
- assert abs(min_vels[0]) == abs(
- max_vels[0]
- ), "Differential drive requires both wheel joints to have same min and max absolute velocities!"
- max_lin_vel = max_vels[0] * wheel_radius
- max_ang_vel = max_lin_vel * 2.0 / wheel_axle_length
- command_output_limits = ((-max_lin_vel, -max_ang_vel), (max_lin_vel, max_ang_vel))
-
- # Run super init
- super().__init__(
- control_freq=control_freq,
- control_limits=control_limits,
- joint_idx=joint_idx,
- command_input_limits=command_input_limits,
- command_output_limits=command_output_limits,
- )
-
- def reset(self):
- # No-op
- pass
-
- def _command_to_control(self, command, control_dict):
- """
- Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
- This processes converts the desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control
- signals.
-
- :param command: Array[float], desired (already preprocessed) 2D command to convert into control signals
- Consists of desired (lin_vel, ang_vel) of the controlled body
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation. Must include the following keys:
-
- :return: Array[float], outputted (non-clipped!) velocity control signal to deploy
- to the [left, right] wheel joints
- """
- lin_vel, ang_vel = command
-
- # Convert to wheel velocities
- left_wheel_joint_vel = (lin_vel - ang_vel * self.wheel_axle_halflength) / self.wheel_radius
- right_wheel_joint_vel = (lin_vel + ang_vel * self.wheel_axle_halflength) / self.wheel_radius
-
- # Return desired velocities
- return np.array([left_wheel_joint_vel, right_wheel_joint_vel])
-
- @property
- def control_type(self):
- return ControlType.VELOCITY
-
- @property
- def command_dim(self):
- # [lin_vel, ang_vel]
- return 2
diff --git a/igibson/controllers/ik_controller.py b/igibson/controllers/ik_controller.py
index c244d2d0c..0d48463f1 100644
--- a/igibson/controllers/ik_controller.py
+++ b/igibson/controllers/ik_controller.py
@@ -2,221 +2,167 @@
import pybullet as p
import igibson.utils.transform_utils as T
-from igibson.controllers import ControlType, ManipulationController
from igibson.utils.filters import MovingAverageFilter
# Different modes
-IK_MODE_COMMAND_DIMS = {
- "pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
- "pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose
- "position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
- "position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
+IK_MODES = {
+ "pose_absolute_ori", # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
+ "pose_delta_ori", # 6DOF (dx,dy,dz,dax,day,daz) control over pose
+ "position_fixed_ori", # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
+ "position_compliant_ori", # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
}
-IK_MODES = set(IK_MODE_COMMAND_DIMS.keys())
-class InverseKinematicsController(ManipulationController):
+class IKController:
"""
- Controller class to convert (delta) EEF commands into joint velocities using Inverse Kinematics (IK).
+ Simple controller class to convert (delta) EEF commands into joint velocities
- Each controller step consists of the following:
- 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
- 2. Run Inverse Kinematics to back out joint velocities for a desired task frame command
- 3. Clips the resulting command by the motor (velocity) limits
+ Args:
+ robot (BaseRobot): Robot to control
+ config (dict): Config associated with this iG setup
"""
- def __init__(
- self,
- base_body_id,
- task_link_id,
- control_freq,
- default_joint_pos,
- joint_damping,
- control_limits,
- joint_idx,
- command_input_limits="default",
- command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)),
- kv=2.0,
- mode="pose_delta_ori",
- smoothing_filter_size=None,
- workspace_pose_limiter=None,
- joint_range_tolerance=0.01,
- ):
+ def __init__(self, robot, config):
+ # Store internal variables
+ self.robot = robot
+ self.config = config
+ self.input_max = np.array(config["controller"]["input_max"])
+ self.input_min = np.array(config["controller"]["input_min"])
+ self.output_max = np.array(config["controller"]["output_max"])
+ self.output_min = np.array(config["controller"]["output_min"])
+ self.action_scale = abs(self.output_max - self.output_min) / abs(self.input_max - self.input_min)
+ self.action_output_transform = (self.output_max + self.output_min) / 2.0
+ self.action_input_transform = (self.input_max + self.input_min) / 2.0
+ self.lpf = MovingAverageFilter(obs_dim=len(self.robot.upper_joint_limits), filter_width=2)
+
+ # Set mode and make sure it's valid
+ self.mode = config["controller"].get("mode", "pose_delta_ori")
+ assert self.mode in IK_MODES, f"Invalid IK mode specified. Valid options: {IK_MODES}. Got: {self.mode}"
+
+ # Store global limits
+ self.eef_always_in_frame = config["controller"].get("eef_always_in_frame", False)
+ self.neutral_xy = config["controller"].get("neutral_xy", [0.25, 0])
+ self.radius_limit = config["controller"].get("radius_limit", 0.5)
+ self.height_limits = config["controller"].get("height_limits", [0.2, 1.5])
+
+ # Get vertical and horizontal fov
+ self.vertical_fov = config["vertical_fov"] * np.pi / 180.0
+ width, height = config["image_width"], config["image_height"]
+ self.horizontal_fov = 2 * np.arctan(np.tan(self.vertical_fov / 2.0) * width / height)
+
+ # Create var to store orientation reference (may be necessary based on mode)
+ self.ori_target = None # quaternion
+
+ def reset(self):
"""
- :param base_body_id: int, unique pybullet ID corresponding to the pybullet body being controlled by IK
- :param task_link_id: int, pybullet link ID corresponding to the link within the body being controlled by IK
- :param control_freq: int, controller loop frequency
- :param default_joint_pos: Array[float], default joint positions, used as part of nullspace controller in IK
- :param joint_damping: Array[float], joint damping parameters associated with each joint
- in the body being controlled
- :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
- control signal. Should specify per-actuator type limits, i.e.:
-
- "position": [[min], [max]]
- "velocity": [[min], [max]]
- "torque": [[min], [max]]
- "has_limit": [...bool...]
-
- Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
- :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
- controller-relevant values during control computations
- :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
- if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
- If None, no clipping will be used. If "default", range will be set to (-1, 1)
- :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
- is the min/max scaled command. If both this value and @command_input_limits is not None,
- then all inputted command values will be scaled from the input range to the output range.
- If either is None, no scaling will be used. If "default", then this range will automatically be set
- to the @control_limits entry corresponding to self.control_type
- :param kv: float, Gain applied to error between IK-commanded joint positions and current joint positions
- :param mode: str, mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz)
- cartesian values, relative to the robot base frame. Valid options are:
- - "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
- where the orientation is given in absolute axis-angle coordinates
- - "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose
- - "position_fixed_ori": 3DOF (dx,dy,dz) control over position,
- with orientation commands being kept as fixed initial absolute orientation
- - "position_compliant_ori": 3DOF (dx,dy,dz) control over position,
- with orientation commands automatically being sent as 0s (so can drift over time)
- :param smoothing_filter_size: None or int, if specified, sets the size of a moving average filter to apply
- on all outputted IK joint positions.
- :param workspace_pose_limiter: None or function, if specified, callback method that should clip absolute
- target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace
- range (i.e.: this can be unique to each robot, and implemented by each embodiment).
- Function signature should be:
-
- def limiter(command_pos: Array[float], command_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]]
-
- where pos_command is (x,y,z) cartesian position values, command_quat is (x,y,z,w) quarternion orientation
- values, and the returned tuple is the processed (pos, quat) command.
- :param joint_range_tolerance: float, amount to add to each side of the inputted joint range, to improve IK
- convergence stability (e.g.: for joint_ranges = 0 for no limits, prevents NaNs from occurring)
+ Reset this controller
"""
- # Store arguments
- control_dim = len(joint_damping)
- self.control_filter = (
- None
- if smoothing_filter_size in {None, 0}
- else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size)
- )
- assert mode in IK_MODES, "Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}"
- self.mode = mode
- self.kv = kv
- self.workspace_pose_limiter = workspace_pose_limiter
- self.base_body_id = base_body_id
- self.task_link_id = task_link_id
- self.default_joint_pos = np.array(default_joint_pos)
- self.joint_damping = np.array(joint_damping)
- self.joint_range_tolerance = joint_range_tolerance
-
- # Other variables that will be filled in at runtime
- self._quat_target = None
-
- # Run super init
- super().__init__(
- control_freq=control_freq,
- control_limits=control_limits,
- joint_idx=joint_idx,
- command_input_limits=command_input_limits,
- command_output_limits=command_output_limits,
- )
+ self.lpf = MovingAverageFilter(obs_dim=len(self.robot.upper_joint_limits), filter_width=2)
+ self.ori_target = None
- def reset(self):
- # Reset the filter and clear internal control state
- if self.control_filter is not None:
- self.control_filter.reset()
- self._quat_target = None
+ def scale_command(self, command):
+ """
+ Scales the inputted action based on internal limits
+
+ Args:
+ command (6-array): Inputted raw command
- @staticmethod
- def _pose_in_base_to_pose_in_world(pose_in_base, base_in_world):
+ Returns:
+ 6-array: Scaled command
"""
- Convert a pose in the base frame to a pose in the world frame.
+ # Clip command
+ command = np.clip(command, self.input_min, self.input_max)
+ return (command - self.action_input_transform) * self.action_scale + self.action_output_transform
- :param pose_in_base: Tuple[Array[float], Array[float]], Cartesian xyz position,
- quaternion xyzw orientation tuple corresponding to the desired pose in its local base frame
- :param base_in_world: Tuple[Array[float], Array[float]], Cartesian xyz position,
- quaternion xyzw orientation tuple corresponding to the base pose in the global static frame
+ def get_current_error(self, current, set_point):
+ """
+ Returns an array of differences between the desired joint positions and current joint positions.
+ Useful for PID control.
+
+ :param current: the current joint positions
+ :param set_point: the joint positions that are desired as a numpy array
+ :return: the current error in the joint positions
+ """
+ error = current - set_point
+ return error
+
+ def bullet_base_pose_to_world_pose(self, pose_in_base):
+ """
+ Convert a pose in the base frame to a pose in the world frame.
- :return Tuple[Array[float], Array[float]]: Cartesian xyz position,
- quaternion xyzw orientation tuple corresponding to the desired pose in the global static frame
+ :param pose_in_base: a (pos, orn) tuple
+ :return pose_in world: a (pos, orn) tuple
"""
+
pose_in_base_mat = T.pose2mat(pose_in_base)
- base_pose_in_world_mat = T.pose2mat(base_in_world)
+
+ base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.robot.robot_ids[0])[0])
+ base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.robot.robot_ids[0])[1])
+ base_pose_in_world_mat = T.pose2mat((base_pos_in_world, base_orn_in_world))
+
pose_in_world_mat = T.pose_in_A_to_pose_in_B(pose_A=pose_in_base_mat, pose_A_in_B=base_pose_in_world_mat)
return T.mat2pose(pose_in_world_mat)
- def _command_to_control(self, command, control_dict):
- """
- Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
- This processes the command based on self.mode, possibly clips the command based on self.workspace_pose_limiter,
-
- :param command: Array[float], desired (already preprocessed) command to convert into control signals
- Is one of:
- (dx,dy,dz) - desired delta cartesian position
- (dx,dy,dz,dax,day,daz) - desired delta cartesian position and delta axis-angle orientation
- (dx,dy,dz,ax,ay,az) - desired delta cartesian position and global axis-angle orientation
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation. Must include the following keys:
- joint_position: Array of current joint positions
- base_pos: (x,y,z) cartesian position of the robot's base relative to the static global frame
- base_quat: (x,y,z,w) quaternion orientation of the robot's base relative to the static global frame
- task_pos_relative: (x,y,z) relative cartesian position of the desired task frame to control, computed
- in its local frame (e.g.: robot base frame)
- task_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task frame to control,
- computed in its local frame (e.g.: robot base frame)
-
- :return: Array[float], outputted (non-clipped!) velocity control signal to deploy
+ def joint_positions_for_delta_command(self, delta):
"""
- # Grab important info from control dict
- pos_relative = np.array(control_dict["task_pos_relative"])
- quat_relative = np.array(control_dict["task_quat_relative"])
+ This function runs inverse kinematics to back out target joint positions
+ from the inputted delta command.
- # The first three values of the command are always the (delta) position, convert to absolute values
- dpos = command[:3]
- target_pos = pos_relative + dpos
+ :param delta: a relative pose command defined by (dx, dy, dz, and optionally [dar, dap, day])
+
+ :return: A list of size @num_joints corresponding to the target joint angles.
+ """
+ # Compute position
+ dpos = delta[:3]
+ target_pos = self.robot.get_relative_eef_position() + dpos
+
+ # Clip values if they're past the limits
+ xy_vec = target_pos[:2] - self.neutral_xy # second value is "neutral" location
+ d = np.linalg.norm(xy_vec)
+ target_pos[:2] = (min(self.radius_limit, d) / d) * xy_vec + self.neutral_xy
+ target_pos[2] = np.clip(target_pos[2], *self.height_limits)
+
+ # Also clip the target pos if we want to keep the eef in frame
+ if self.eef_always_in_frame:
+ # Calculate angle from base to eef in xy plane
+ angle = np.arctan2(target_pos[1], target_pos[0])
+ # Clip angle appropriately
+ angle_clipped = np.clip(angle, -self.horizontal_fov / 2, self.horizontal_fov / 2)
+ # Project the original vector onto a unit vector pointing in the direction of the clipped angle
+ unit_xy_angle_clipped = np.array([np.cos(angle_clipped), np.sin(angle_clipped)])
+ target_pos[:2] = np.dot(target_pos[:2], unit_xy_angle_clipped) * unit_xy_angle_clipped
+
+ # print(f"target pos: {target_pos}")
# Compute orientation
if self.mode == "position_fixed_ori":
# We need to grab the current robot orientation as the commanded orientation if there is none saved
- if self._quat_target is None:
- self._quat_target = quat_relative
- target_quat = self._quat_target
+ if self.ori_target is None:
+ self.ori_target = self.robot.get_relative_eef_orientation()
+ target_quat = self.ori_target
elif self.mode == "position_compliant_ori":
# Target quat is simply the current robot orientation
- target_quat = quat_relative
+ target_quat = self.robot.get_relative_eef_orientation()
elif self.mode == "pose_absolute_ori":
# Received "delta" ori is in fact the desired absolute orientation
- target_quat = T.axisangle2quat(command[3:])
+ target_quat = T.axisangle2quat(delta[3:])
else: # pose_delta_ori control
# Grab dori and compute target ori
- dori = T.quat2mat(T.axisangle2quat(command[3:]))
- target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative))
-
- # Possibly limit to workspace if specified
- if self.workspace_pose_limiter is not None:
- target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict)
+ dori = T.quat2mat(T.axisangle2quat(delta[3:]))
+ target_quat = T.mat2quat(dori @ T.quat2mat(self.robot.get_relative_eef_orientation()))
# Convert to world frame
- target_pos, target_quat = self._pose_in_base_to_pose_in_world(
- pose_in_base=(target_pos, target_quat),
- base_in_world=(np.array(control_dict["base_pos"]), np.array(control_dict["base_quat"])),
- )
+ target_pos, target_quat = self.bullet_base_pose_to_world_pose((target_pos, target_quat))
# Calculate and return IK-backed out joint angles
- joint_targets = self._calc_joint_angles_from_ik(target_pos=target_pos, target_quat=target_quat)
-
- # Grab the resulting error and scale it by the velocity gain
- u = -self.kv * (control_dict["joint_position"] - joint_targets)
-
- # Return these commanded velocities, (only the relevant joint idx)
- return u[self.joint_idx]
+ return self.calc_joint_angles_from_ik(target_pos=target_pos, target_quat=target_quat)
- def _calc_joint_angles_from_ik(self, target_pos, target_quat):
+ def calc_joint_angles_from_ik(self, target_pos, target_quat):
"""
Solves for joint angles given the ik target position and orientation
Note that this outputs joint angles for the entire pybullet robot body! It is the responsibility of the
- associated Robot class to filter out the redundant / non-impact joints from the computation
+ associated BaseRobot class to filter out the redundant / non-impact joints from the computation
Args:
target_pos (3-array): absolute (x, y, z) eef position command (in robot base frame)
@@ -225,35 +171,47 @@ def _calc_joint_angles_from_ik(self, target_pos, target_quat):
Returns:
n-array: corresponding joint positions to match the inputted targets
"""
+ # Update robot state
+ self.robot.calc_state()
+
# Run IK
cmd_joint_pos = np.array(
p.calculateInverseKinematics(
- bodyIndex=self.base_body_id,
- endEffectorLinkIndex=self.task_link_id,
+ bodyIndex=self.robot.robot_ids[0],
+ endEffectorLinkIndex=self.robot.eef_link_id,
targetPosition=target_pos.tolist(),
targetOrientation=target_quat.tolist(),
- lowerLimits=(self.control_limits[ControlType.POSITION][0] - self.joint_range_tolerance).tolist(),
- upperLimits=(self.control_limits[ControlType.POSITION][1] + self.joint_range_tolerance).tolist(),
- jointRanges=(
- self.control_limits[ControlType.POSITION][1]
- - self.control_limits[ControlType.POSITION][0]
- + 2 * self.joint_range_tolerance
- ).tolist(),
- restPoses=self.default_joint_pos.tolist(),
- jointDamping=self.joint_damping.tolist(),
+ lowerLimits=self.robot.lower_joint_limits.tolist(),
+ upperLimits=self.robot.upper_joint_limits.tolist(),
+ jointRanges=self.robot.joint_range.tolist(),
+ restPoses=self.robot.untucked_default_joints.tolist(),
+ jointDamping=self.robot.joint_damping.tolist(),
)
)
-
- # Optionally pass through smoothing filter for better stability
- if self.control_filter is not None:
- cmd_joint_pos = self.control_filter.estimate(cmd_joint_pos)
+ cmd_joint_pos = self.lpf.estimate(np.array(cmd_joint_pos))
return cmd_joint_pos
- @property
- def control_type(self):
- return ControlType.VELOCITY
+ def control(self, command):
+ """
+ Execute IK control, given @command.
+
+ Args:
+ command (6-array): a DELTA relative pose command defined by (dx, dy, dz, dar, dap, day)
+
+ Returns:
+ n-array: commanded joint velocities to achieve the inputted @command.
+ """
+ # First, scale command
+ command = self.scale_command(command)
+
+ # Get desired joint positions from IK solver
+ cmd_joint_pos = self.joint_positions_for_delta_command(delta=command)
+
+ # Grab the resulting error and scale it by the velocity gain
+ cmd_joint_vel = self.config["controller"]["kv_vel"] * self.get_current_error(
+ current=self.robot.joint_position, set_point=cmd_joint_pos
+ )
- @property
- def command_dim(self):
- return IK_MODE_COMMAND_DIMS[self.mode]
+ # Return these commanded velocities
+ return cmd_joint_vel
diff --git a/igibson/controllers/joint_controller.py b/igibson/controllers/joint_controller.py
deleted file mode 100644
index 5a1d5b70d..000000000
--- a/igibson/controllers/joint_controller.py
+++ /dev/null
@@ -1,124 +0,0 @@
-import numpy as np
-
-from igibson.controllers import ControlType, LocomotionController, ManipulationController
-from igibson.utils.python_utils import assert_valid_key
-
-
-class JointController(LocomotionController, ManipulationController):
- """
- Controller class for joint control. Because pybullet can handle direct position / velocity / torque
- control signals, this is merely a pass-through operation from command to control (with clipping / scaling built in).
-
- Each controller step consists of the following:
- 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
- 2a. If using delta commands, then adds the command to the current joint state
- 2b. Clips the resulting command by the motor limits
- """
-
- def __init__(
- self,
- control_freq,
- motor_type,
- control_limits,
- joint_idx,
- command_input_limits="default",
- command_output_limits="default",
- use_delta_commands=False,
- use_compliant_mode=True,
- ):
- """
- :param control_freq: int, controller loop frequency
- :param motor_type: str, type of motor being controlled, one of {position, velocity, torque}
- :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
- control signal. Should specify per-actuator type limits, i.e.:
-
- "position": [[min], [max]]
- "velocity": [[min], [max]]
- "torque": [[min], [max]]
- "has_limit": [...bool...]
-
- Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
- :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
- controller-relevant values during control computations
- :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
- if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
- If None, no clipping will be used. If "default", range will be set to (-1, 1)
- :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
- is the min/max scaled command. If both this value and @command_input_limits is not None,
- then all inputted command values will be scaled from the input range to the output range.
- If either is None, no scaling will be used. If "default", then this range will automatically be set
- to the @control_limits entry corresponding to self.control_type
- :param use_delta_commands: bool, whether inputted commands should be interpreted as delta or absolute values
- :param use_compliant_mode: bool, only relevant if @use_delta_command is True. If True, will add delta commands
- to the current observed joint states. Otherwise, will store initial references
- to these values and add the delta values to them. Note that setting this to False can only be used with
- "position" motor control, and may be useful for e.g.: setting an initial large delta value, and then sending
- subsequent zero commands, which can converge faster than sending individual small delta commands
- sequentially.
- """
- # Store arguments
- assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
- self.motor_type = motor_type.lower()
- self.use_delta_commands = use_delta_commands
- self.use_compliant_mode = use_compliant_mode
-
- # If we're using compliant mode, make sure we're using joint position control (this doesn't make sense for
- # velocity or torque control)
- if not self.use_compliant_mode:
- assert self.motor_type == "position", f"If not using compliant mode, motor control type must be position!"
-
- # Other variables that will be used at runtime
- self._joint_target = None
-
- # Run super init
- super().__init__(
- control_freq=control_freq,
- control_limits=control_limits,
- joint_idx=joint_idx,
- command_input_limits=command_input_limits,
- command_output_limits=command_output_limits,
- )
-
- def reset(self):
- # Clear the target
- self._joint_target = None
-
- def _command_to_control(self, command, control_dict):
- """
- Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal
-
- :param command: Array[float], desired (already preprocessed) command to convert into control signals
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation. Must include the following keys:
- joint_position: Array of current joint positions
- joint_velocity: Array of current joint velocities
- joint_torque: Array of current joint torques
-
- :return: Array[float], outputted (non-clipped!) control signal to deploy
- """
- # If we're using delta commands, add this value
- if self.use_delta_commands:
- if self.use_compliant_mode:
- # Add this to the current observed state
- u = control_dict["joint_{}".format(self.motor_type)][self.joint_idx] + command
- else:
- # Otherwise, we add to the internally stored target state
- # (also, initialize the target if we haven't done so already)
- if self._joint_target is None:
- self._joint_target = np.array(control_dict["joint_{}".format(self.motor_type)][self.joint_idx])
- self._joint_target = self._joint_target + command
- u = self._joint_target
- # Otherwise, control is simply the command itself
- else:
- u = command
-
- # Return control
- return u
-
- @property
- def control_type(self):
- return ControlType.get_type(type_str=self.motor_type)
-
- @property
- def command_dim(self):
- return len(self.joint_idx)
diff --git a/igibson/controllers/null_gripper_controller.py b/igibson/controllers/null_gripper_controller.py
deleted file mode 100644
index 4b8af716c..000000000
--- a/igibson/controllers/null_gripper_controller.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import numpy as np
-
-from igibson.controllers import ControlType, ManipulationController
-from igibson.utils.python_utils import assert_valid_key
-
-VALID_MODES = {
- "binary",
- "smooth",
- "independent",
-}
-
-
-class NullGripperController(ManipulationController):
- """
- Dummy Controller class for non-prehensile gripper control (i.e.: no control).
- This class has a zero-size command space, and returns an empty array for control
- """
-
- def __init__(
- self,
- control_freq,
- control_limits,
- command_input_limits="default",
- command_output_limits="default",
- ):
- """
- :param control_freq: int, controller loop frequency
- :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
- control signal. Should specify per-actuator type limits, i.e.:
-
- "position": [[min], [max]]
- "velocity": [[min], [max]]
- "torque": [[min], [max]]
- "has_limit": [...bool...]
-
- Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
- :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
- if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
- If None, no clipping will be used. If "default", range will be set to (-1, 1)
- :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
- is the min/max scaled command. If both this value and @command_input_limits is not None,
- then all inputted command values will be scaled from the input range to the output range.
- If either is None, no scaling will be used. If "default", then this range will automatically be set
- to the @control_limits entry corresponding to self.control_type
- """
- # Immediately run super init
-
- # Run super init
- super().__init__(
- control_freq=control_freq,
- control_limits=control_limits,
- joint_idx=np.array([], dtype=np.int), # no joints controlled
- command_input_limits=command_input_limits,
- command_output_limits=command_output_limits,
- )
-
- def reset(self):
- # No-op
- pass
-
- def _preprocess_command(self, command):
- # No action
- return command
-
- def clip_control(self, control):
- # No action
- return control
-
- def _command_to_control(self, command, control_dict):
- """
- Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper
- joint control signal. Since this is a null controller, command should be an empty numpy array
- and this function will equivalently return an empty array
-
- :param command: Array[float], desired (already preprocessed) command to convert into control signals.
- This should always be 2D command for each gripper joint (Empty in this case)
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation.
-
- :return: Array[float], outputted (non-clipped!) control signal to deploy. This is an empty np.array
- """
- return np.array([])
-
- @property
- def control_type(self):
- return ControlType.POSITION
-
- @property
- def command_dim(self):
- return 0
diff --git a/igibson/controllers/parallel_jaw_gripper_controller.py b/igibson/controllers/parallel_jaw_gripper_controller.py
deleted file mode 100644
index 7fbe2f9f8..000000000
--- a/igibson/controllers/parallel_jaw_gripper_controller.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import numpy as np
-
-from igibson.controllers import ControlType, ManipulationController
-from igibson.utils.python_utils import assert_valid_key
-
-VALID_MODES = {
- "binary",
- "smooth",
- "independent",
-}
-
-
-class ParallelJawGripperController(ManipulationController):
- """
- Controller class for parallel jaw gripper control. This either interprets an input as a binary
- command (open / close), continuous command (open / close with scaled velocities), or per-joint continuous command
-
- Each controller step consists of the following:
- 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
- 2a. Convert command into gripper joint control signals
- 2b. Clips the resulting control by the motor limits
- """
-
- def __init__(
- self,
- control_freq,
- motor_type,
- control_limits,
- joint_idx,
- command_input_limits="default",
- command_output_limits="default",
- mode="binary",
- limit_tolerance=0.001,
- ):
- """
- :param control_freq: int, controller loop frequency
- :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
- control signal. Should specify per-actuator type limits, i.e.:
-
- "position": [[min], [max]]
- "velocity": [[min], [max]]
- "torque": [[min], [max]]
- "has_limit": [...bool...]
-
- Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
- :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
- controller-relevant values during control computations
- :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
- if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
- If None, no clipping will be used. If "default", range will be set to (-1, 1)
- :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
- is the min/max scaled command. If both this value and @command_input_limits is not None,
- then all inputted command values will be scaled from the input range to the output range.
- If either is None, no scaling will be used. If "default", then this range will automatically be set
- to the @control_limits entry corresponding to self.control_type
- :param mode: str, mode for this controller. Valid options are:
-
- "binary": 1D command, if preprocessed value > 0 is interpreted as an max open
- (send max pos / vel / tor signal), otherwise send max close control signals
- "smooth": 1D command, sends symmetric signal to both finger joints equal to the preprocessed commands
- "independent": 2D command, sends independent signals to each finger joint equal to the preprocessed command
- :param limit_tolerance: float, sets the tolerance from the joint limit ends, below which controls will be zeroed
- out if the control is using velocity or torque control
- """
- # Store arguments
- assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
- self.motor_type = motor_type.lower()
- assert_valid_key(key=mode, valid_keys=VALID_MODES, name="mode for parallel jaw gripper")
- self.mode = mode
- self.limit_tolerance = limit_tolerance
-
- # If we're using binary signal, we override the command output limits
- if mode == "binary":
- command_output_limits = (-1.0, 1.0)
-
- # Run super init
- super().__init__(
- control_freq=control_freq,
- control_limits=control_limits,
- joint_idx=joint_idx,
- command_input_limits=command_input_limits,
- command_output_limits=command_output_limits,
- )
-
- def reset(self):
- # No-op
- pass
-
- def _preprocess_command(self, command):
- # We extend this method to make sure command is always 2D
- if self.mode != "independent":
- command = np.array([command] * 2) if type(command) in {int, float} else np.array([command[0]] * 2)
-
- # Return from super method
- return super()._preprocess_command(command=command)
-
- def _command_to_control(self, command, control_dict):
- """
- Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper
- joint control signal
-
- :param command: Array[float], desired (already preprocessed) command to convert into control signals.
- This should always be 2D command for each gripper joint
- :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
- states necessary for controller computation. Must include the following keys:
- joint_position: Array of current joint positions
-
- :return: Array[float], outputted (non-clipped!) control signal to deploy
- """
- joint_pos = control_dict["joint_position"][self.joint_idx]
- # Choose what to do based on control mode
- if self.mode == "binary":
- # Use max control signal
- u = (
- self.control_limits[ControlType.POSITION][1][self.joint_idx]
- if command[0] >= 0.0
- else self.control_limits[ControlType.POSITION][0][self.joint_idx]
- )
- else:
- # Use continuous signal
- u = command
-
- # If we're near the joint limits and we're using velocity / torque control, we zero out the action
- if self.motor_type in {"velocity", "torque"}:
- violate_upper_limit = (
- joint_pos > self.control_limits[ControlType.POSITION][1][self.joint_idx] - self.limit_tolerance
- )
- violate_lower_limit = (
- joint_pos < self.control_limits[ControlType.POSITION][0][self.joint_idx] + self.limit_tolerance
- )
- violation = np.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0))
- u *= ~violation
-
- # Return control
- return u
-
- @property
- def control_type(self):
- return ControlType.get_type(type_str=self.motor_type)
-
- @property
- def command_dim(self):
- return 2 if self.mode == "independent" else 1
diff --git a/igibson/envs/behavior_env.py b/igibson/envs/behavior_env.py
new file mode 100644
index 000000000..0775385a0
--- /dev/null
+++ b/igibson/envs/behavior_env.py
@@ -0,0 +1,412 @@
+import argparse
+import datetime
+import os
+import time
+from collections import OrderedDict
+
+import bddl
+import gym.spaces
+import numpy as np
+from bddl.condition_evaluation import evaluate_state
+
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
+from igibson.envs.igibson_env import iGibsonEnv
+from igibson.robots.behavior_robot import BehaviorRobot
+from igibson.robots.fetch_gripper_robot import FetchGripper
+from igibson.utils.checkpoint_utils import load_checkpoint
+from igibson.utils.ig_logging import IGLogWriter
+from igibson.objects.articulated_object import URDFObject
+from igibson.object_states.on_floor import RoomFloor
+
+
+class BehaviorEnv(iGibsonEnv):
+ """
+ iGibson Environment (OpenAI Gym interface)
+ """
+
+ def __init__(
+ self,
+ config_file,
+ scene_id=None,
+ mode="headless",
+ action_timestep=1 / 30.0,
+ physics_timestep=1 / 300.0,
+ device_idx=0,
+ render_to_tensor=False,
+ automatic_reset=False,
+ seed=0,
+ action_filter="mobile_manipulation",
+ instance_id=0,
+ episode_save_dir=None,
+ rng=None
+ ):
+ """
+ :param config_file: config_file path
+ :param scene_id: override scene_id in config file
+ :param mode: headless, gui, iggui
+ :param action_timestep: environment executes action per action_timestep second
+ :param physics_timestep: physics timestep for pybullet
+ :param device_idx: which GPU to run the simulation and rendering on
+ :param render_to_tensor: whether to render directly to pytorch tensors
+ :param automatic_reset: whether to automatic reset after an episode finishes
+ """
+ self.action_filter = action_filter
+ self.instance_id = instance_id
+ super(BehaviorEnv, self).__init__(
+ config_file=config_file,
+ scene_id=scene_id,
+ mode=mode,
+ action_timestep=action_timestep,
+ physics_timestep=physics_timestep,
+ device_idx=device_idx,
+ render_to_tensor=render_to_tensor,
+ )
+ if rng is None:
+ self.rng = np.random.default_rng(seed=seed)
+ else:
+ self.rng = rng
+ self.automatic_reset = automatic_reset
+ self.reward_potential = None
+ self.episode_save_dir = episode_save_dir
+ if self.episode_save_dir is not None:
+ os.makedirs(self.episode_save_dir, exist_ok=True)
+ self.task_relevant_objects = [
+ item
+ for item in self.task.object_scope.values()
+ if isinstance(item, URDFObject) or isinstance(item, RoomFloor)
+ ]
+ self.num_objects = len(self.task_relevant_objects)
+
+ self.log_writer = None
+
+ # Set iggui viewing position to start at the robot's initial position.
+ robot_init_pos = self.simulator.robots[0].get_position()
+ if self.simulator.viewer is not None:
+ self.simulator.viewer.px = robot_init_pos[0]
+ self.simulator.viewer.py = robot_init_pos[1]
+ self.simulator.viewer.pz = robot_init_pos[2]
+
+ # Make sure different parallel environments will have different random seeds
+ # np.random.seed(os.getpid())
+ # np.random.seed(0) # For now, force this to be 0 so we have reproducible behavior!
+
+ def load_action_space(self):
+ """
+ Load action space
+ """
+ if isinstance(self.robots[0], BehaviorRobot):
+ if self.action_filter == "navigation":
+ self.action_space = gym.spaces.Box(shape=(3,), low=-1.0, high=1.0, dtype=np.float32)
+ elif self.action_filter == "mobile_manipulation":
+ self.action_space = gym.spaces.Box(shape=(17,), low=-1.0, high=1.0, dtype=np.float32)
+ elif self.action_filter == "tabletop_manipulation":
+ self.action_space = gym.spaces.Box(shape=(7,), low=-1.0, high=1.0, dtype=np.float32)
+ else:
+ self.action_space = gym.spaces.Box(shape=(26,), low=-1.0, high=1.0, dtype=np.float32)
+ elif isinstance(self.robots[0], FetchGripper):
+ if self.action_filter == "navigation":
+ self.action_space = gym.spaces.Box(shape=(2,), low=-1.0, high=1.0, dtype=np.float32)
+ elif self.action_filter == "tabletop_manipulation":
+ self.action_space = gym.spaces.Box(shape=(7,), low=-1.0, high=1.0, dtype=np.float32)
+ else:
+ self.action_space = gym.spaces.Box(shape=(11,), low=-1.0, high=1.0, dtype=np.float32)
+ else:
+ Exception("Only BehaviorRobot and FetchGripper are supported for behavior_env")
+
+ def load_behavior_task_setup(self):
+ """
+ Load task setup
+ """
+ # task
+ task = self.config["task"]
+ task_id = self.config["task_id"]
+ scene_id = self.config["scene_id"]
+ clutter = self.config["clutter"]
+ online_sampling = self.config["online_sampling"]
+ # if online_sampling:
+ # scene_kwargs = {}
+ # else:
+ scene_kwargs = {
+ "urdf_file": "{}_task_{}_{}_{}_fixed_furniture".format(scene_id, task, task_id, self.instance_id),
+ }
+ bddl.set_backend("iGibson")
+ robot_class = self.config.get("robot")
+ if robot_class == "BehaviorRobot":
+ robot_type = BehaviorRobot
+ elif robot_class == "FetchGripper":
+ robot_type = FetchGripper
+ else:
+ Exception("Only BehaviorRobot and FetchGripper are supported for behavior_env")
+
+ self.task = iGBEHAVIORActivityInstance(task, task_id, robot_type=robot_type, robot_config=self.config)
+ self.task.initialize_simulator(
+ simulator=self.simulator,
+ scene_id=scene_id,
+ load_clutter=clutter,
+ scene_kwargs=scene_kwargs,
+ online_sampling=online_sampling,
+ )
+
+ for _, obj in self.task.object_scope.items():
+ if obj.category in ["agent", "room_floor"]:
+ continue
+ obj.highlight()
+
+ self.scene = self.task.scene
+ self.robots = self.simulator.robots
+
+ self.reset_checkpoint_idx = self.config.get("reset_checkpoint_idx", -1)
+ self.reset_checkpoint_dir = self.config.get("reset_checkpoint_dir", None)
+
+ def load_task_setup(self):
+ self.initial_pos_z_offset = self.config.get("initial_pos_z_offset", 0.1)
+ # s = 0.5 * G * (t ** 2)
+ drop_distance = 0.5 * 9.8 * (self.action_timestep ** 2)
+ assert drop_distance < self.initial_pos_z_offset, "initial_pos_z_offset is too small for collision checking"
+
+ # ignore the agent's collision with these body ids
+ self.collision_ignore_body_b_ids = set(self.config.get("collision_ignore_body_b_ids", []))
+ # ignore the agent's collision with these link ids of itself
+ self.collision_ignore_link_a_ids = set(self.config.get("collision_ignore_link_a_ids", []))
+
+ # discount factor
+ self.discount_factor = self.config.get("discount_factor", 0.99)
+
+ # domain randomization frequency
+ self.texture_randomization_freq = self.config.get("texture_randomization_freq", None)
+ self.object_randomization_freq = self.config.get("object_randomization_freq", None)
+
+ self.load_behavior_task_setup()
+
+ # Activate the robot constraints so that we don't need to feed in
+ # trigger press action in the first couple frames
+ if isinstance(self.robots[0], BehaviorRobot):
+ self.robots[0].activate()
+
+ def load(self):
+ """
+ Load environment
+ """
+ self.load_task_setup()
+ self.load_observation_space()
+ self.load_action_space()
+ self.load_miscellaneous_variables()
+
+ def load_observation_space(self):
+ super(BehaviorEnv, self).load_observation_space()
+ if "proprioception" in self.output:
+ proprioception_dim = self.robots[0].get_proprioception_dim()
+ self.observation_space.spaces["proprioception"] = gym.spaces.Box(
+ low=-np.inf, high=np.inf, shape=(proprioception_dim,)
+ )
+ self.observation_space = gym.spaces.Dict(self.observation_space.spaces)
+
+ def step(self, action, save_video=False, task_name=""):
+ """
+ Apply robot's action.
+ Returns the next state, reward, done and info,
+ following OpenAI Gym's convention
+
+ :param action: robot actions
+ :return: state: next observation
+ :return: reward: reward of this time step
+ :return: done: whether the episode is terminated
+ :return: info: info dictionary with any useful information
+ """
+ self.current_step += 1
+
+ if isinstance(self.robots[0], BehaviorRobot):
+ new_action = np.zeros((28,))
+ if self.action_filter == "navigation":
+ new_action[:2] = action[:2]
+ new_action[5] = action[2]
+ elif self.action_filter == "mobile_manipulation":
+ # body x,y,yaw
+ new_action[:2] = action[:2]
+ new_action[5] = action[2]
+ # left hand 7d
+ new_action[12:19] = action[3:10]
+ # right hand 7d
+ new_action[20:27] = action[10:17]
+ elif self.action_filter == "tabletop_manipulation":
+ # only using right hand
+ new_action[20:27] = action[:7]
+ else:
+ # all action dims except hand reset
+ new_action[:19] = action[:19]
+ new_action[20:27] = action[19:]
+ # The original action space for BehaviorRobot is too wide for random exploration
+ # new_action *= 0.05
+ elif isinstance(self.robots[0], FetchGripper):
+ new_action = np.zeros((11,))
+ if self.action_filter == "navigation":
+ new_action[:2] = action[:2]
+ elif self.action_filter == "tabletop_manipulation":
+ new_action[4:] = action[:7]
+ else:
+ new_action = action
+ else:
+ Exception("Only BehaviorRobot and FetchGripper are supported for behavior_env")
+
+ self.robots[0].apply_action(new_action)
+ if self.log_writer is not None:
+ self.log_writer.process_frame()
+ self.simulator.step(save_video=save_video, task_name=task_name)
+
+ state = self.get_state()
+ info = {}
+ done, satisfied_predicates = self.task.check_success()
+ # Compute the initial reward potential here instead of during reset
+ # because if an intermediate checkpoint is loaded, we need step the
+ # simulator before calling task.check_success
+ if self.current_step == 1:
+ self.reward_potential = self.get_potential(satisfied_predicates)
+
+ if self.current_step >= self.config["max_step"]:
+ done = True
+ reward, info = self.get_reward(satisfied_predicates)
+
+ self.populate_info(info)
+
+ if done and self.automatic_reset:
+ info["last_observation"] = state
+ state = self.reset()
+
+ return state, reward, done, info
+
+ def get_potential(self, satisfied_predicates):
+ potential = 0.0
+
+ # Evaluate the first ground goal state option as the potential
+ _, satisfied_predicates = evaluate_state(self.task.ground_goal_state_options[0])
+ success_score = len(satisfied_predicates["satisfied"]) / (
+ len(satisfied_predicates["satisfied"]) + len(satisfied_predicates["unsatisfied"])
+ )
+ predicate_potential = success_score
+ potential += predicate_potential
+
+ return potential
+
+ def get_reward(self, satisfied_predicates):
+ new_potential = self.get_potential(satisfied_predicates)
+ reward = new_potential - self.reward_potential
+ self.reward_potential = new_potential
+ return reward, {"satisfied_predicates": satisfied_predicates}
+
+ def get_state(self, collision_links=[]):
+ """
+ Get the current observation
+
+ :param collision_links: collisions from last physics timestep
+ :return: observation as a dictionary
+ """
+ state = OrderedDict()
+ if "task_obs" in self.output:
+ state["task_obs"] = self.task.get_task_obs(self)
+ if "vision" in self.sensors:
+ vision_obs = self.sensors["vision"].get_obs(self)
+ for modality in vision_obs:
+ state[modality] = vision_obs[modality]
+ if "scan_occ" in self.sensors:
+ scan_obs = self.sensors["scan_occ"].get_obs(self)
+ for modality in scan_obs:
+ state[modality] = scan_obs[modality]
+ if "bump" in self.sensors:
+ state["bump"] = self.sensors["bump"].get_obs(self)
+
+ if "proprioception" in self.output:
+ state["proprioception"] = np.array(self.robots[0].get_proprioception())
+
+ return state
+
+ def reset_scene_and_agent(self):
+ if self.reset_checkpoint_dir is not None and self.reset_checkpoint_idx != -1:
+ load_checkpoint(self.simulator, self.reset_checkpoint_dir, self.reset_checkpoint_idx)
+ else:
+ self.task.reset_scene(snapshot_id=self.task.initial_state)
+ # set the constraints to the current poses
+ self.robots[0].apply_action(np.zeros(self.robots[0].action_dim))
+
+ def reset(self):
+ """
+ Reset episode
+ """
+ # if self.log_writer is not None, save previous episode
+ if self.log_writer is not None:
+ self.log_writer.end_log_session()
+ del self.log_writer
+ self.log_writer = None
+
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ task = self.config["task"]
+ task_id = self.config["task_id"]
+ scene = self.config["scene_id"]
+ if self.episode_save_dir:
+ vr_log_path = os.path.join(
+ self.episode_save_dir,
+ "{}_{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp, self.current_episode),
+ )
+ self.log_writer = IGLogWriter(
+ self.simulator,
+ frames_before_write=200,
+ log_filepath=vr_log_path,
+ task=self.task,
+ store_vr=False,
+ vr_robot=self.robots[0],
+ filter_objects=True,
+ )
+ self.log_writer.set_up_data_storage()
+
+ self.reset_scene_and_agent()
+
+ self.simulator.sync(force_sync=True)
+ state = self.get_state()
+ self.reset_variables()
+
+ return state
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--config",
+ "-c",
+ default="igibson/examples/configs/behavior_onboard_sensing_fetch.yaml",
+ help="which config file to use [default: use yaml files in examples/configs]",
+ )
+ parser.add_argument(
+ "--mode",
+ "-m",
+ choices=["headless", "gui", "iggui", "pbgui"],
+ default="iggui",
+ help="which mode for simulation (default: headless)",
+ )
+ parser.add_argument(
+ "--action_filter",
+ "-af",
+ choices=["navigation", "tabletop_manipulation", "mobile_manipulation", "all"],
+ default="mobile_manipulation",
+ help="which action filter",
+ )
+ args = parser.parse_args()
+
+ env = BehaviorEnv(
+ config_file=args.config,
+ mode=args.mode,
+ action_timestep=1.0 / 30.0,
+ physics_timestep=1.0 / 300.0,
+ action_filter=args.action_filter,
+ episode_save_dir=None,
+ )
+ step_time_list = []
+ for episode in range(100):
+ print("Episode: {}".format(episode))
+ start = time.time()
+ env.reset()
+ for i in range(1000): # 10 seconds
+ action = env.action_space.sample()
+ state, reward, done, _ = env.step(action)
+ if done:
+ break
+ print("Episode finished after {} timesteps, took {} seconds.".format(env.current_step, time.time() - start))
+ env.close()
diff --git a/igibson/envs/behavior_mp_env.py b/igibson/envs/behavior_mp_env.py
index 883e9f62f..f046b2c57 100644
--- a/igibson/envs/behavior_mp_env.py
+++ b/igibson/envs/behavior_mp_env.py
@@ -1,4 +1,5 @@
import argparse
+import logging
import time
from enum import IntEnum
@@ -14,19 +15,17 @@
from igibson.objects.articulated_object import URDFObject
from igibson.robots.behavior_robot import BRBody, BREye, BRHand
from igibson.utils.behavior_robot_planning_utils import dry_run_base_plan, plan_base_motion_br, plan_hand_motion_br
-from igibson.utils.utils import restoreState
+
+NUM_ACTIONS = 6
class ActionPrimitives(IntEnum):
NAVIGATE_TO = 0
- LEFT_GRASP = 1
- RIGHT_GRASP = 2
- LEFT_PLACE_ONTOP = 3
- RIGHT_PLACE_ONTOP = 4
- LEFT_PLACE_INSIDE = 5
- RIGHT_PLACE_INSIDE = 6
- OPEN = 7
- CLOSE = 8
+ GRASP = 1
+ PLACE_ONTOP = 2
+ PLACE_INSIDE = 3
+ OPEN = 4
+ CLOSE = 5
def get_aabb_volume(lo, hi):
@@ -47,12 +46,11 @@ def detect_collision(bodyA, object_in_hand=None):
def detect_robot_collision(robot):
- left_object_in_hand = robot.links["left_hand"].object_in_hand
- right_object_in_hand = robot.links["right_hand"].object_in_hand
+ object_in_hand = robot.parts["right_hand"].object_in_hand
return (
- detect_collision(robot.links["body"].get_body_id())
- or detect_collision(robot.links["left_hand"].get_body_id(), left_object_in_hand)
- or detect_collision(robot.links["right_hand"].get_body_id(), right_object_in_hand)
+ detect_collision(robot.parts["body"].body_id)
+ or detect_collision(robot.parts["left_hand"].body_id)
+ or detect_collision(robot.parts["right_hand"].body_id, object_in_hand)
)
@@ -69,24 +67,21 @@ def __init__(
action_timestep=1 / 10.0,
physics_timestep=1 / 240.0,
device_idx=0,
+ render_to_tensor=False,
automatic_reset=False,
seed=0,
action_filter="mobile_manipulation",
use_motion_planning=False,
- activity_relevant_objects_only=True,
):
"""
- @param config_file: config_file path
- @param scene_id: override scene_id in config file
- :param mode: headless, headless_tensor, gui_interactive, gui_non_interactive
- @param action_timestep: environment executes action per action_timestep second
- @param physics_timestep: physics timestep for pybullet
- @param device_idx: which GPU to run the simulation and rendering on
- @param automatic_reset: whether to automatic reset after an episode finishes
- @param seed: RNG seed for sampling
- @param action_filter: see BehaviorEnv
- @param use_motion_planning: Whether motion-planned primitives or magic primitives should be used
- @param activity_relevant_objects_only: Whether the actions should be parameterized by AROs or all scene objs.
+ :param config_file: config_file path
+ :param scene_id: override scene_id in config file
+ :param mode: headless, gui, iggui
+ :param action_timestep: environment executes action per action_timestep second
+ :param physics_timestep: physics timestep for pybullet
+ :param device_idx: which GPU to run the simulation and rendering on
+ :param render_to_tensor: whether to render directly to pytorch tensors
+ :param automatic_reset: whether to automatic reset after an episode finishes
"""
super(BehaviorMPEnv, self).__init__(
config_file=config_file,
@@ -95,29 +90,24 @@ def __init__(
action_timestep=action_timestep,
physics_timestep=physics_timestep,
device_idx=device_idx,
+ render_to_tensor=render_to_tensor,
action_filter=action_filter,
seed=seed,
automatic_reset=automatic_reset,
)
+ self.obj_in_hand = None
self.use_motion_planning = use_motion_planning
- self.activity_relevant_objects_only = activity_relevant_objects_only
self.robots[0].initial_z_offset = 0.7
def load_action_space(self):
- if self.activity_relevant_objects_only:
- self.addressable_objects = [
- item
- for item in self.task.object_scope.values()
- if isinstance(item, URDFObject) or isinstance(item, RoomFloor)
- ]
- else:
- self.addressable_objects = list(
- set(self.task.simulator.scene.objects_by_name.values()) | set(self.task.object_scope.values())
- )
-
- self.num_objects = len(self.addressable_objects)
- self.action_space = gym.spaces.Discrete(self.num_objects * len(ActionPrimitives))
+ self.task_relevant_objects = [
+ item
+ for item in self.task.object_scope.values()
+ if isinstance(item, URDFObject) or isinstance(item, RoomFloor)
+ ]
+ self.num_objects = len(self.task_relevant_objects)
+ self.action_space = gym.spaces.Discrete(self.num_objects * NUM_ACTIONS)
def get_body_ids(self, include_self=False):
ids = []
@@ -126,158 +116,356 @@ def get_body_ids(self, include_self=False):
ids.extend(object.body_ids)
if include_self:
- ids.append(self.robots[0].links["left_hand"].get_body_id())
- ids.append(self.robots[0].links["body"].get_body_id())
+ ids.append(self.robots[0].parts["left_hand"].get_body_id())
+ ids.append(self.robots[0].parts["body"].get_body_id())
return ids
- def reset_and_release_hand(self, hand):
+ def reset_and_release_hand(self):
self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
for _ in range(100):
- self.robots[0].links[hand].set_close_fraction(0)
- self.robots[0].links[hand].trigger_fraction = 0
+ self.robots[0].parts["right_hand"].set_close_fraction(0)
+ self.robots[0].parts["right_hand"].trigger_fraction = 0
p.stepSimulation()
def step(self, action):
obj_list_id = int(action) % self.num_objects
action_primitive = int(action) // self.num_objects
- obj = self.addressable_objects[obj_list_id]
+ obj = self.task_relevant_objects[obj_list_id]
if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)):
if action_primitive == ActionPrimitives.NAVIGATE_TO:
- if self.navigate_to_obj(obj):
- print("PRIMITIVE: navigate to {} success".format(obj.name))
+ if self.navigate_to_obj(obj, use_motion_planning=self.use_motion_planning):
+ logging.debug("PRIMITIVE: navigate to {} success".format(obj.name))
else:
- print("PRIMITIVE: navigate to {} fail".format(obj.name))
+ logging.debug("PRIMITIVE: navigate to {} fail".format(obj.name))
- elif action_primitive == ActionPrimitives.RIGHT_GRASP or action_primitive == ActionPrimitives.LEFT_GRASP:
- hand = "right_hand" if action_primitive == ActionPrimitives.RIGHT_GRASP else "left_hand"
- obj_in_hand_id = self.robots[0].links[hand].object_in_hand
- obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None
- if obj_in_hand is None:
+ elif action_primitive == ActionPrimitives.GRASP:
+ if self.obj_in_hand is None:
if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states:
lo, hi = obj.states[object_states.AABB].get_value()
volume = get_aabb_volume(lo, hi)
- if volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed: # we can only grasp small objects
- self.navigate_if_needed(obj)
- self.grasp_obj(obj, hand)
- obj_in_hand_id = self.robots[0].links[hand].object_in_hand
- obj_in_hand = (
- self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None
- )
- print("PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, obj_in_hand))
+ if (
+ volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed
+ ): # say we can only grasp small objects
+ if (
+ np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position()))
+ < 2
+ ):
+ self.grasp_obj(obj, use_motion_planning=self.use_motion_planning)
+ logging.debug(
+ "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand)
+ )
+ else:
+ logging.debug("PRIMITIVE: grasp {} fail, too far".format(obj.name))
else:
- print("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name))
- elif (
- action_primitive == ActionPrimitives.LEFT_PLACE_ONTOP
- or action_primitive == ActionPrimitives.RIGHT_PLACE_ONTOP
- ):
- hand = "right_hand" if action_primitive == ActionPrimitives.RIGHT_PLACE_ONTOP else "left_hand"
- obj_in_hand_id = self.robots[0].links[hand].object_in_hand
- obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None
- if obj_in_hand is not None and obj_in_hand != obj:
- print("PRIMITIVE:attempt to place {} ontop {}".format(obj_in_hand.name, obj.name))
+ logging.debug("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name))
+ elif action_primitive == ActionPrimitives.PLACE_ONTOP:
+ if self.obj_in_hand is not None and self.obj_in_hand != obj:
+ logging.debug("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name))
if isinstance(obj, URDFObject):
- self.navigate_if_needed(obj)
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ state = p.saveState()
+ result = sample_kinematics(
+ "onTop",
+ self.obj_in_hand,
+ obj,
+ True,
+ use_ray_casting_method=True,
+ max_trials=20,
+ skip_falling=True,
+ )
+ if result:
+ logging.debug(
+ "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ else:
+ p.removeState(state)
+ logging.debug(
+ "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
+ else:
+ logging.debug(
+ "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name)
+ )
+ else:
state = p.saveState()
result = sample_kinematics(
- "onTop",
- obj_in_hand,
+ "onFloor",
+ self.obj_in_hand,
obj,
True,
use_ray_casting_method=True,
max_trials=20,
+ skip_falling=True,
)
-
if result:
- pos = obj_in_hand.get_position()
- orn = obj_in_hand.get_orientation()
- self.place_obj(state, pos, orn, hand)
- print("PRIMITIVE: place {} ontop {} success".format(obj_in_hand.name, obj.name))
+ logging.debug(
+ "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
else:
+ logging.debug(
+ "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
p.removeState(state)
- print("PRIMITIVE: place {} ontop {} fail, sampling fail".format(obj_in_hand.name, obj.name))
+
+ elif action_primitive == ActionPrimitives.PLACE_INSIDE:
+ if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject):
+ logging.debug("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name))
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ if (
+ hasattr(obj, "states")
+ and object_states.Open in obj.states
+ and obj.states[object_states.Open].get_value()
+ ) or (hasattr(obj, "states") and not object_states.Open in obj.states):
+ state = p.saveState()
+ result = sample_kinematics(
+ "inside",
+ self.obj_in_hand,
+ obj,
+ True,
+ use_ray_casting_method=True,
+ max_trials=20,
+ skip_falling=True,
+ )
+ if result:
+ logging.debug(
+ "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ else:
+ logging.debug(
+ "PRIMITIVE: place {} inside {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
+ p.removeState(state)
+ else:
+ logging.debug(
+ "PRIMITIVE: place {} inside {} fail, need open not open".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
else:
- state = p.saveState()
- result = sample_kinematics(
- "onFloor", obj_in_hand, obj, True, use_ray_casting_method=True, max_trials=20
+ logging.debug(
+ "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name)
)
- if result:
- print("PRIMITIVE: place {} ontop {} success".format(obj_in_hand.name, obj.name))
- pos = obj_in_hand.get_position()
- orn = obj_in_hand.get_orientation()
- self.place_obj(state, pos, orn, hand)
+ elif action_primitive == ActionPrimitives.OPEN:
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ if hasattr(obj, "states") and object_states.Open in obj.states:
+ obj.states[object_states.Open].set_value(True)
+ else:
+ logging.debug("PRIMITIVE open failed, cannot be opened")
+ else:
+ logging.debug("PRIMITIVE open failed, too far")
+
+ elif action_primitive == ActionPrimitives.CLOSE:
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ if hasattr(obj, "states") and object_states.Open in obj.states:
+ obj.states[object_states.Open].set_value(False)
+ else:
+ logging.debug("PRIMITIVE close failed, cannot be opened")
+ else:
+ logging.debug("PRIMITIVE close failed, too far")
+
+ state, reward, done, info = super(BehaviorMPEnv, self).step(np.zeros(17))
+ logging.debug("PRIMITIVE satisfied predicates:", info["satisfied_predicates"])
+ return state, reward, done, info
+
+ def step_with_exec_info(self, action):
+ """
+ Same as the above step method, but returns an additional param (action_exec_status)
+ that is True if the high-level action execution succeeded and False otherwise
+ """
+ obj_list_id = int(action) % self.num_objects
+ action_primitive = int(action) // self.num_objects
+ action_exec_status = False
+
+ obj = self.task_relevant_objects[obj_list_id]
+ if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)):
+ if action_primitive == ActionPrimitives.NAVIGATE_TO:
+ if self.navigate_to_obj(obj, use_motion_planning=self.use_motion_planning):
+ print("PRIMITIVE: navigate to {} success".format(obj.name))
+ action_exec_status = True
+ else:
+ print("PRIMITIVE: navigate to {} fail".format(obj.name))
+
+ elif action_primitive == ActionPrimitives.GRASP:
+ if self.obj_in_hand is None:
+ if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states:
+ lo, hi = obj.states[object_states.AABB].get_value()
+ volume = get_aabb_volume(lo, hi)
+ if (
+ volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed
+ ): # say we can only grasp small objects
+ if (
+ np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position()))
+ < 2
+ ):
+ self.grasp_obj(obj, use_motion_planning=self.use_motion_planning)
+ print(
+ "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand)
+ )
+ action_exec_status = True
+ else:
+ print("PRIMITIVE: grasp {} fail, too far".format(obj.name))
else:
- print("PRIMITIVE: place {} ontop {} fail, sampling fail".format(obj_in_hand.name, obj.name))
- p.removeState(state)
+ print("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name))
+ elif action_primitive == ActionPrimitives.PLACE_ONTOP:
+ if self.obj_in_hand is not None and self.obj_in_hand != obj:
+ print("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name))
- elif (
- action_primitive == ActionPrimitives.LEFT_PLACE_INSIDE
- or action_primitive == ActionPrimitives.RIGHT_PLACE_INSIDE
- ):
- hand = "right_hand" if action_primitive == ActionPrimitives.RIGHT_PLACE_INSIDE else "left_hand"
- obj_in_hand_id = self.robots[0].links[hand].object_in_hand
- obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None
- if obj_in_hand is not None and obj_in_hand != obj and isinstance(obj, URDFObject):
- print("PRIMITIVE:attempt to place {} inside {}".format(obj_in_hand.name, obj.name))
- if (
- hasattr(obj, "states")
- and object_states.Open in obj.states
- and obj.states[object_states.Open].get_value()
- ) or (hasattr(obj, "states") and not object_states.Open in obj.states):
- self.navigate_if_needed(obj)
+ if isinstance(obj, URDFObject):
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ state = p.saveState()
+ result = sample_kinematics(
+ "onTop",
+ self.obj_in_hand,
+ obj,
+ True,
+ use_ray_casting_method=True,
+ max_trials=20,
+ skip_falling=True,
+ )
+ if result:
+ print(
+ "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ action_exec_status = True
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ else:
+ p.removeState(state)
+ print(
+ "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
+ else:
+ print(
+ "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name)
+ )
+ else:
state = p.saveState()
result = sample_kinematics(
- "inside",
- obj_in_hand,
+ "onFloor",
+ self.obj_in_hand,
obj,
True,
use_ray_casting_method=True,
max_trials=20,
+ skip_falling=True,
)
-
if result:
- pos = obj_in_hand.get_position()
- orn = obj_in_hand.get_orientation()
- self.place_obj(state, pos, orn, hand)
- print("PRIMITIVE: place {} inside {} success".format(obj_in_hand.name, obj.name))
+ print(
+ "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ action_exec_status = True
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
else:
print(
- "PRIMITIVE: place {} inside {} fail, sampling fail".format(obj_in_hand.name, obj.name)
+ "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
)
p.removeState(state)
+
+ elif action_primitive == ActionPrimitives.PLACE_INSIDE:
+ if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject):
+ print("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name))
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ if (
+ hasattr(obj, "states")
+ and object_states.Open in obj.states
+ and obj.states[object_states.Open].get_value()
+ ) or (hasattr(obj, "states") and not object_states.Open in obj.states):
+ state = p.saveState()
+ result = sample_kinematics(
+ "inside",
+ self.obj_in_hand,
+ obj,
+ True,
+ use_ray_casting_method=True,
+ max_trials=20,
+ skip_falling=True,
+ )
+ if result:
+ print(
+ "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ action_exec_status = True
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ else:
+ print(
+ "PRIMITIVE: place {} inside {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
+ p.removeState(state)
+ else:
+ print(
+ "PRIMITIVE: place {} inside {} fail, need open not open".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
else:
print(
- "PRIMITIVE: place {} inside {} fail, need open not open".format(obj_in_hand.name, obj.name)
+ "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name)
)
elif action_primitive == ActionPrimitives.OPEN:
- self.navigate_if_needed(obj)
-
- if hasattr(obj, "states") and object_states.Open in obj.states:
- obj.states[object_states.Open].set_value(True, fully=True)
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ if hasattr(obj, "states") and object_states.Open in obj.states:
+ action_exec_status = True
+ print("PRIMITIVE open succeeded")
+ obj.states[object_states.Open].set_value(True)
+ else:
+ print("PRIMITIVE open failed, cannot be opened")
else:
- print("PRIMITIVE open failed, cannot be opened")
+ print("PRIMITIVE open failed, too far")
elif action_primitive == ActionPrimitives.CLOSE:
- self.navigate_if_needed(obj)
-
- if hasattr(obj, "states") and object_states.Open in obj.states:
- obj.states[object_states.Open].set_value(False)
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ if hasattr(obj, "states") and object_states.Open in obj.states:
+ action_exec_status = True
+ obj.states[object_states.Open].set_value(False)
+ print("PRIMITIVE close succeeded")
+ else:
+ print("PRIMITIVE close failed, cannot be opened")
else:
- print("PRIMITIVE close failed, cannot be opened")
+ print("PRIMITIVE close failed, too far")
+
+ else:
+ print(f"Attempted to execute a High Level Action whose target was the robot's body! This is incorrect, please pass a sensible high level action argument!")
state, reward, done, info = super(BehaviorMPEnv, self).step(np.zeros(17))
print("PRIMITIVE satisfied predicates:", info["satisfied_predicates"])
- return state, reward, done, info
+ return state, reward, done, info, action_exec_status
- def grasp_obj(self, obj, hand):
- if self.use_motion_planning:
+ def grasp_obj(self, obj, use_motion_planning=False):
+ if use_motion_planning:
x, y, _ = obj.get_position()
z = obj.states[object_states.AABB].get_value()[1][2]
- hand_x, hand_y, hand_z = self.robots[0].links[hand].get_position()
+ hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position()
# add a little randomness to avoid getting stuck
x += np.random.uniform(-0.025, 0.025)
@@ -300,81 +488,84 @@ def grasp_obj(self, obj, hand):
hand_limits=((minx, miny, minz), (maxx, maxy, maxz)),
obstacles=self.get_body_ids(include_self=True),
)
- restoreState(state)
+ p.restoreState(state)
p.removeState(state)
if plan is not None:
- grasp_success = self.execute_grasp_plan(plan, obj, hand)
- print("grasp success", grasp_success)
- if not grasp_success:
- print("grasp failed")
- self.reset_and_release_hand(hand)
+ grasp_success = self.execute_grasp_plan(plan, obj)
+ logging.debug("grasp success", grasp_success)
+ if grasp_success:
+ self.obj_in_hand = obj
+ else:
+ logging.debug("grasp failed")
+ self.reset_and_release_hand()
else:
- print("plan is None")
- self.reset_and_release_hand(hand)
- # reset hand
+ logging.debug("plan is None")
+ self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
+ self.reset_and_release_hand()
else:
- obj.set_position(np.array(self.robots[0].links[hand].get_position())) # - np.array([0,0,0.05])
- self.robots[0].links[hand].set_close_fraction(1)
- self.robots[0].links[hand].trigger_fraction = 1
+ self.obj_in_hand = obj
+ obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position()))
+ self.robots[0].parts["right_hand"].set_close_fraction(1)
+ self.robots[0].parts["right_hand"].trigger_fraction = 1
p.stepSimulation()
- obj.set_position(np.array(self.robots[0].links[hand].get_position()))
- self.robots[0].links[hand].handle_assisted_grasping(
+ obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position()))
+ self.robots[0].parts["right_hand"].handle_assisted_grasping(
np.zeros(
28,
),
- override_ag_data=(obj.get_body_id(), -1),
+ override_ag_data=(obj.body_id[0], -1),
)
- def execute_grasp_plan(self, plan, obj, hand):
+ def execute_grasp_plan(self, plan, obj):
for x, y, z, roll, pitch, yaw in plan:
- self.robots[0].links[hand].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
p.stepSimulation()
x, y, z, roll, pitch, yaw = plan[-1]
+ # lower the hand until it touches the object
for i in range(25):
- self.robots[0].links[hand].move([x, y, z - i * 0.005], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ self.robots[0].parts["right_hand"].move([x, y, z - i * 0.005], p.getQuaternionFromEuler([roll, pitch, yaw]))
p.stepSimulation()
+ # close the hand
for _ in range(50):
- self.robots[0].links[hand].set_close_fraction(1)
- self.robots[0].links[hand].trigger_fraction = 1
+ self.robots[0].parts["right_hand"].set_close_fraction(1)
+ self.robots[0].parts["right_hand"].trigger_fraction = 1
p.stepSimulation()
grasp_success = (
self.robots[0]
- .links[hand]
+ .parts["right_hand"]
.handle_assisted_grasping(
np.zeros(
28,
),
- override_ag_data=(obj.get_body_id(), -1),
+ override_ag_data=(obj.body_id[0], -1),
)
)
+ # reverse the plan and get object close to torso
for x, y, z, roll, pitch, yaw in plan[::-1]:
- self.robots[0].links[hand].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
p.stepSimulation()
return grasp_success
- def place_obj(self, original_state, target_pos, target_orn, hand):
- obj_in_hand_id = self.robots[0].links[hand].object_in_hand
- obj_in_hand = self.scene.objects_by_id[obj_in_hand_id]
-
- pos = obj_in_hand.get_position()
- restoreState(original_state)
+ def place_obj(self, original_state, target_pos, target_orn, use_motion_planning=False):
+ pos = self.obj_in_hand.get_position()
+ p.restoreState(original_state)
p.removeState(original_state)
- if not self.use_motion_planning:
- self.reset_and_release_hand(hand)
-
- self.robots[0].links[hand].force_release_obj()
- obj_in_hand.set_position_orientation(target_pos, target_orn)
+ if not use_motion_planning:
+ self.reset_and_release_hand()
+ self.robots[0].parts["right_hand"].force_release_obj()
+ self.obj_in_hand.set_position_orientation(target_pos, target_orn)
+ self.obj_in_hand = None
else:
x, y, z = target_pos
- hand_x, hand_y, hand_z = self.robots[0].links[hand].get_position()
+ hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position()
minx = min(x, hand_x) - 1
miny = min(y, hand_y) - 1
@@ -385,30 +576,34 @@ def place_obj(self, original_state, target_pos, target_orn, hand):
state = p.saveState()
obstacles = self.get_body_ids()
- obstacles.remove(self.obj_in_hand.get_body_id())
+ obstacles.remove(self.obj_in_hand.body_id[0])
plan = plan_hand_motion_br(
robot=self.robots[0],
- obj_in_hand=obj_in_hand,
+ obj_in_hand=self.obj_in_hand,
end_conf=[x, y, z + 0.1, 0, np.pi * 5 / 6.0, 0],
hand_limits=((minx, miny, minz), (maxx, maxy, maxz)),
obstacles=obstacles,
) #
- restoreState(state)
+ p.restoreState(state)
p.removeState(state)
if plan:
for x, y, z, roll, pitch, yaw in plan:
- self.robots[0].links[hand].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
p.stepSimulation()
+ released_obj = self.obj_in_hand
+ self.obj_in_hand = None
- self.reset_and_release_hand(hand)
+ # release hand
+ self.reset_and_release_hand()
# force release object to avoid dealing with stateful AG release mechanism
- self.robots[0].links[hand].force_release_obj()
+ self.robots[0].parts["right_hand"].force_release_obj()
self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
+ # reset hand
# reset the released object to zero velocity
- p.resetBaseVelocity(obj_in_hand.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0])
+ p.resetBaseVelocity(released_obj.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0])
# let object fall
for _ in range(100):
@@ -420,22 +615,27 @@ def sample_fn(self):
theta = np.random.uniform(*CIRCULAR_LIMITS)
return (x, y, theta)
- def navigate_to_obj(self, obj):
+ def navigate_to_obj(self, obj, use_motion_planning=False):
# test agent positions around an obj
# try to place the agent near the object, and rotate it to the object
valid_position = None # ((x,y,z),(roll, pitch, yaw))
original_position = self.robots[0].get_position()
original_orientation = self.robots[0].get_orientation()
+
if isinstance(obj, URDFObject):
distance_to_try = [0.6, 1.2, 1.8, 2.4]
obj_pos = obj.get_position()
for distance in distance_to_try:
- for _ in range(20):
+ for _ in range(40):
yaw = np.random.uniform(-np.pi, np.pi)
- pos = [obj_pos[0] + distance * np.cos(yaw), obj_pos[1] + distance * np.sin(yaw), 0.7]
+ pos = [
+ obj_pos[0] + distance * np.cos(yaw),
+ obj_pos[1] + distance * np.sin(yaw),
+ self.robots[0].initial_z_offset,
+ ]
orn = [0, 0, yaw - np.pi]
self.robots[0].set_position_orientation(pos, p.getQuaternionFromEuler(orn))
- eye_pos = self.robots[0].links["eye"].get_position()
+ eye_pos = self.robots[0].parts["eye"].get_position()
obj_pos = obj.get_position()
ray_test_res = p.rayTest(eye_pos, obj_pos)
blocked = False
@@ -448,7 +648,7 @@ def navigate_to_obj(self, obj):
if valid_position is not None:
break
else:
- for _ in range(60):
+ for _ in range(120):
_, pos = obj.scene.get_random_point_by_room_instance(obj.room_instance)
yaw = np.random.uniform(-np.pi, np.pi)
orn = [0, 0, yaw]
@@ -458,21 +658,12 @@ def navigate_to_obj(self, obj):
break
if valid_position is not None:
- target_x = valid_position[0][0]
- target_y = valid_position[0][1]
- x = original_position[0]
- y = original_position[1]
- minx = min(x, target_x) - 1
- miny = min(y, target_y) - 1
- maxx = max(x, target_x) + 1
- maxy = max(y, target_y) + 1
-
- if self.use_motion_planning:
+ if use_motion_planning:
self.robots[0].set_position_orientation(original_position, original_orientation)
plan = plan_base_motion_br(
robot=self.robots[0],
end_conf=[valid_position[0][0], valid_position[0][1], valid_position[1][2]],
- base_limits=[(minx, miny), (maxx, maxy)],
+ base_limits=(),
obstacles=self.get_body_ids(),
override_sample_fn=self.sample_fn,
)
@@ -481,7 +672,6 @@ def navigate_to_obj(self, obj):
if self.mode != "headless":
dry_run_base_plan(robot=self.robots[0], plan=plan)
else:
- # TODO: Still execute the plan in headless mode instead of just teleporting.
self.robots[0].set_position_orientation(
valid_position[0], p.getQuaternionFromEuler(valid_position[1])
)
@@ -496,20 +686,12 @@ def navigate_to_obj(self, obj):
self.robots[0].set_position_orientation(original_position, original_orientation)
return False
- def navigate_if_needed(self, obj):
- if obj.states[object_states.InReachOfRobot].get_value():
- return
-
- for _ in range(10):
- if self.navigate_to_obj(obj):
- return
-
def reset(self, resample_objects=False):
+ # NOTE: The below line used to be super(BehaviorMPEnv, self, resample_objects=resample_objects).reset()
+ # However, this causes an error because the super call only expects 2 args.
obs = super(BehaviorMPEnv, self).reset()
- for hand in ["left_hand", "right_hand"]:
- self.robots[0].links[hand].set_close_fraction(0)
- self.robots[0].links[hand].trigger_fraction = 0
- self.robots[0].links[hand].force_release_obj()
+ self.obj_in_hand = None
+
return obs
@@ -524,9 +706,9 @@ def reset(self, resample_objects=False):
parser.add_argument(
"--mode",
"-m",
- choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"],
- default="gui_interactive",
- help="which mode for simulation (default: gui_interactive)",
+ choices=["headless", "gui", "iggui", "pbgui"],
+ default="gui",
+ help="which mode for simulation (default: headless)",
)
args = parser.parse_args()
@@ -542,7 +724,6 @@ def reset(self, resample_objects=False):
print("Episode: {}".format(episode))
start = time.time()
env.reset()
-
for i in range(1000): # 10 seconds
action = env.action_space.sample()
state, reward, done, info = env.step(action)
diff --git a/igibson/envs/behavior_tamp_env.py b/igibson/envs/behavior_tamp_env.py
new file mode 100644
index 000000000..9230488d0
--- /dev/null
+++ b/igibson/envs/behavior_tamp_env.py
@@ -0,0 +1,851 @@
+import argparse
+import logging
+import time
+from enum import IntEnum
+
+import gym.spaces
+import numpy as np
+import pybullet as p
+import scipy
+
+from igibson import object_states
+from igibson.envs.behavior_env import BehaviorEnv
+from igibson.external.pybullet_tools.utils import CIRCULAR_LIMITS, get_base_difference_fn
+from igibson.object_states.on_floor import RoomFloor
+from igibson.object_states.utils import sample_kinematics, continuous_param_kinematics
+from igibson.objects.articulated_object import URDFObject
+from igibson.robots.behavior_robot import BRBody, BREye, BRHand
+from igibson.utils.behavior_robot_planning_utils import dry_run_base_plan, plan_base_motion_br, plan_hand_motion_br
+# from igibson.external.pybullet_tools.utils import (
+# CIRCULAR_LIMITS,
+# MAX_DISTANCE,
+# PI,
+# birrt,
+# circular_difference,
+# direct_path,
+# get_base_difference_fn,
+# get_base_distance_fn,
+# pairwise_collision,
+# )
+
+NUM_ACTIONS = 6
+MAX_ACTION_CONT_PARAMS = 7
+
+
+class ActionPrimitives(IntEnum):
+ NAVIGATE_TO = 0
+ GRASP = 1
+ PLACE_ONTOP = 2
+ PLACE_INSIDE = 3
+ OPEN = 4
+ CLOSE = 5
+
+
+def get_aabb_volume(lo, hi):
+ dimension = hi - lo
+ return dimension[0] * dimension[1] * dimension[2]
+
+
+def detect_collision(bodyA, object_in_hand=None):
+ collision = False
+ for body_id in range(p.getNumBodies()):
+ if body_id == bodyA or body_id == object_in_hand:
+ continue
+ closest_points = p.getClosestPoints(bodyA, body_id, distance=0.01)
+ if len(closest_points) > 0:
+ collision = True
+ break
+ return collision
+
+
+def detect_robot_collision(robot):
+ object_in_hand = robot.parts["right_hand"].object_in_hand
+ return (
+ detect_collision(robot.parts["body"].body_id)
+ or detect_collision(robot.parts["left_hand"].body_id)
+ or detect_collision(robot.parts["right_hand"].body_id, object_in_hand)
+ )
+
+
+class BehaviorTAMPEnv(BehaviorEnv):
+ """
+ iGibson Environment (OpenAI Gym interface).
+ Similar to BehaviorMPEnv, except (1) all high-level
+ actions are parameterized to make them deterministic
+ and thus conducive to sample-based TAMP and (2) there are
+ more such high-level actions.
+ """
+
+ def __init__(
+ self,
+ config_file,
+ scene_id=None,
+ mode="headless",
+ action_timestep=1 / 10.0,
+ physics_timestep=1 / 240.0,
+ device_idx=0,
+ render_to_tensor=False,
+ automatic_reset=False,
+ seed=0,
+ action_filter="mobile_manipulation",
+ use_motion_planning=False,
+ ):
+ """
+ :param config_file: config_file path
+ :param scene_id: override scene_id in config file
+ :param mode: headless, gui, iggui
+ :param action_timestep: environment executes action per action_timestep second
+ :param physics_timestep: physics timestep for pybullet
+ :param device_idx: which GPU to run the simulation and rendering on
+ :param render_to_tensor: whether to render directly to pytorch tensors
+ :param automatic_reset: whether to automatic reset after an episode finishes
+ """
+ super(BehaviorTAMPEnv, self).__init__(
+ config_file=config_file,
+ scene_id=scene_id,
+ mode=mode,
+ action_timestep=action_timestep,
+ physics_timestep=physics_timestep,
+ device_idx=device_idx,
+ render_to_tensor=render_to_tensor,
+ action_filter=action_filter,
+ seed=seed,
+ automatic_reset=automatic_reset,
+ )
+
+ self.obj_in_hand = None
+ self.use_motion_planning = use_motion_planning
+ self.robots[0].initial_z_offset = 0.7
+
+ def load_action_space(self):
+ self.task_relevant_objects = [
+ item
+ for item in self.task.object_scope.values()
+ if isinstance(item, URDFObject) or isinstance(item, RoomFloor)
+ ]
+ self.num_objects = len(self.task_relevant_objects)
+ self.action_space = gym.spaces.Discrete(self.num_objects * NUM_ACTIONS)
+
+ def get_body_ids(self, include_self=False):
+ ids = []
+ for object in self.scene.get_objects():
+ if isinstance(object, URDFObject):
+ ids.extend(object.body_ids)
+
+ if include_self:
+ ids.append(self.robots[0].parts["left_hand"].get_body_id())
+ ids.append(self.robots[0].parts["body"].get_body_id())
+
+ return ids
+
+ def reset_and_release_hand(self):
+ self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
+ for _ in range(100):
+ self.robots[0].parts["right_hand"].set_close_fraction(0)
+ self.robots[0].parts["right_hand"].trigger_fraction = 0
+ p.stepSimulation()
+
+ # def step(self, action, continuous_params):
+ # """
+ # :param action: an integer such that action % self.num_objects yields the object id, and
+ # action // self.num_objects yields the correct action enum
+ # :param continuous_params: a numpy vector of length MAX_ACTION_CONT_PARAMS. This represents
+ # values derived from the continuous parameters of the action.
+ # """
+
+ # obj_list_id = int(action) % self.num_objects
+ # action_primitive = int(action) // self.num_objects
+ # obj = self.task_relevant_objects[obj_list_id]
+
+ # assert continuous_params.shape == (MAX_ACTION_CONT_PARAMS,)
+
+ # if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)):
+ # if action_primitive == ActionPrimitives.NAVIGATE_TO:
+ # if self.navigate_to_obj_pos(obj, continuous_params[0:2], use_motion_planning=self.use_motion_planning):
+ # logging.debug("PRIMITIVE: navigate to {} success".format(obj.name))
+ # else:
+ # logging.debug("PRIMITIVE: navigate to {} fail".format(obj.name))
+
+ # elif action_primitive == ActionPrimitives.GRASP:
+ # if self.obj_in_hand is None:
+ # if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states:
+ # lo, hi = obj.states[object_states.AABB].get_value()
+ # volume = get_aabb_volume(lo, hi)
+ # if (
+ # volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed
+ # ): # say we can only grasp small objects
+ # if (
+ # np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position()))
+ # < 2
+ # ):
+ # self.grasp_obj(obj, use_motion_planning=self.use_motion_planning)
+ # logging.debug(
+ # "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand)
+ # )
+ # else:
+ # logging.debug("PRIMITIVE: grasp {} fail, too far".format(obj.name))
+ # else:
+ # logging.debug("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name))
+
+ # elif action_primitive == ActionPrimitives.PLACE_ONTOP:
+ # if self.obj_in_hand is not None and self.obj_in_hand != obj:
+ # logging.debug("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name))
+
+ # if isinstance(obj, URDFObject):
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # state = p.saveState()
+ # result = sample_kinematics(
+ # "onTop",
+ # self.obj_in_hand,
+ # obj,
+ # True,
+ # use_ray_casting_method=True,
+ # max_trials=20,
+ # skip_falling=True,
+ # )
+ # if result:
+ # logging.debug(
+ # "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ # )
+ # pos = self.obj_in_hand.get_position()
+ # orn = self.obj_in_hand.get_orientation()
+ # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ # else:
+ # p.removeState(state)
+ # logging.debug(
+ # "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ # self.obj_in_hand.name, obj.name
+ # )
+ # )
+
+ # else:
+ # logging.debug(
+ # "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name)
+ # )
+ # else:
+ # state = p.saveState()
+ # result = sample_kinematics(
+ # "onFloor",
+ # self.obj_in_hand,
+ # obj,
+ # True,
+ # use_ray_casting_method=True,
+ # max_trials=20,
+ # skip_falling=True,
+ # )
+ # if result:
+ # logging.debug(
+ # "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ # )
+ # pos = self.obj_in_hand.get_position()
+ # orn = self.obj_in_hand.get_orientation()
+ # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ # else:
+ # logging.debug(
+ # "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ # self.obj_in_hand.name, obj.name
+ # )
+ # )
+ # p.removeState(state)
+
+ # elif action_primitive == ActionPrimitives.PLACE_INSIDE:
+ # if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject):
+ # logging.debug("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name))
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # if (
+ # hasattr(obj, "states")
+ # and object_states.Open in obj.states
+ # and obj.states[object_states.Open].get_value()
+ # ) or (hasattr(obj, "states") and not object_states.Open in obj.states):
+ # state = p.saveState()
+ # result = sample_kinematics(
+ # "inside",
+ # self.obj_in_hand,
+ # obj,
+ # True,
+ # use_ray_casting_method=True,
+ # max_trials=20,
+ # skip_falling=True,
+ # )
+ # if result:
+ # logging.debug(
+ # "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name)
+ # )
+ # pos = self.obj_in_hand.get_position()
+ # orn = self.obj_in_hand.get_orientation()
+ # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ # else:
+ # logging.debug(
+ # "PRIMITIVE: place {} inside {} fail, sampling fail".format(
+ # self.obj_in_hand.name, obj.name
+ # )
+ # )
+ # p.removeState(state)
+ # else:
+ # logging.debug(
+ # "PRIMITIVE: place {} inside {} fail, need open not open".format(
+ # self.obj_in_hand.name, obj.name
+ # )
+ # )
+ # else:
+ # logging.debug(
+ # "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name)
+ # )
+ # elif action_primitive == ActionPrimitives.OPEN:
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # if hasattr(obj, "states") and object_states.Open in obj.states:
+ # obj.states[object_states.Open].set_value(True)
+ # else:
+ # logging.debug("PRIMITIVE open failed, cannot be opened")
+ # else:
+ # logging.debug("PRIMITIVE open failed, too far")
+
+ # elif action_primitive == ActionPrimitives.CLOSE:
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # if hasattr(obj, "states") and object_states.Open in obj.states:
+ # obj.states[object_states.Open].set_value(False)
+ # else:
+ # logging.debug("PRIMITIVE close failed, cannot be opened")
+ # else:
+ # logging.debug("PRIMITIVE close failed, too far")
+
+ # state, reward, done, info = super(BehaviorTAMPEnv, self).step(np.zeros(17))
+ # logging.debug("PRIMITIVE satisfied predicates:", info["satisfied_predicates"])
+ # return state, reward, done, info
+
+ def step_with_exec_info(self, action, continuous_params):
+ """
+ Same as the above step method, but returns an additional param (action_exec_status)
+ that is True if the high-level action execution succeeded and False otherwise
+ """
+ obj_list_id = int(action) % self.num_objects
+ action_primitive = int(action) // self.num_objects
+ action_exec_status = False
+ obj = self.task_relevant_objects[obj_list_id]
+
+ assert continuous_params.shape == (MAX_ACTION_CONT_PARAMS,)
+
+ if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)):
+ if action_primitive == ActionPrimitives.NAVIGATE_TO:
+ if self.navigate_to_obj_pos(obj, continuous_params[0:2], use_motion_planning=self.use_motion_planning):
+ print("PRIMITIVE: navigate to {} success".format(obj.name))
+ action_exec_status = True
+ else:
+ print(f"PRIMITIVE: navigate to {obj.name} with params {continuous_params[0:2]} fail")
+
+ elif action_primitive == ActionPrimitives.GRASP:
+ if self.obj_in_hand is None:
+ if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states:
+ lo, hi = obj.states[object_states.AABB].get_value()
+ volume = get_aabb_volume(lo, hi)
+ if (
+ volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed
+ ): # say we can only grasp small objects
+ if (
+ np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position()))
+ < 2
+ ):
+ self.grasp_obj_at_pos(obj, continuous_params[0:3], use_motion_planning=self.use_motion_planning)
+ print(
+ "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand)
+ )
+ action_exec_status = True
+ else:
+ print("PRIMITIVE: grasp {} fail, too far".format(obj.name))
+ else:
+ print("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name))
+ else:
+ print("PRIMITIVE: grasp {} fail, agent already has an object in hand!".format(obj.name))
+
+ elif action_primitive == ActionPrimitives.PLACE_ONTOP:
+ if self.obj_in_hand is not None and self.obj_in_hand != obj:
+ print("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name))
+
+ if isinstance(obj, URDFObject):
+ if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ state = p.saveState()
+ result = continuous_param_kinematics(
+ "onTop",
+ self.obj_in_hand,
+ obj,
+ True,
+ continuous_params[0:7],
+ use_ray_casting_method=True,
+ max_trials=10,
+ skip_falling=True,
+ )
+ if result:
+ print(
+ "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ action_exec_status = True
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ else:
+ p.removeState(state)
+ print(
+ "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
+
+ else:
+ print(
+ "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name)
+ )
+ else:
+ state = p.saveState()
+ result = continuous_param_kinematics(
+ "onFloor",
+ self.obj_in_hand,
+ obj,
+ True,
+ continuous_params[0:7],
+ use_ray_casting_method=True,
+ max_trials=10,
+ skip_falling=True,
+ )
+ if result:
+ print(
+ "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name)
+ )
+ action_exec_status = True
+ pos = self.obj_in_hand.get_position()
+ orn = self.obj_in_hand.get_orientation()
+ self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ else:
+ print(
+ "PRIMITIVE: place {} ontop {} fail, sampling fail".format(
+ self.obj_in_hand.name, obj.name
+ )
+ )
+ p.removeState(state)
+
+ # elif action_primitive == ActionPrimitives.PLACE_INSIDE:
+ # if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject):
+ # print("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name))
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # if (
+ # hasattr(obj, "states")
+ # and object_states.Open in obj.states
+ # and obj.states[object_states.Open].get_value()
+ # ) or (hasattr(obj, "states") and not object_states.Open in obj.states):
+ # state = p.saveState()
+ # result = sample_kinematics(
+ # "inside",
+ # self.obj_in_hand,
+ # obj,
+ # True,
+ # use_ray_casting_method=True,
+ # max_trials=20,
+ # skip_falling=True,
+ # )
+ # if result:
+ # print(
+ # "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name)
+ # )
+ # action_exec_status = True
+ # pos = self.obj_in_hand.get_position()
+ # orn = self.obj_in_hand.get_orientation()
+ # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning)
+ # else:
+ # print(
+ # "PRIMITIVE: place {} inside {} fail, sampling fail".format(
+ # self.obj_in_hand.name, obj.name
+ # )
+ # )
+ # p.removeState(state)
+ # else:
+ # print(
+ # "PRIMITIVE: place {} inside {} fail, need open not open".format(
+ # self.obj_in_hand.name, obj.name
+ # )
+ # )
+ # else:
+ # print(
+ # "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name)
+ # )
+ # elif action_primitive == ActionPrimitives.OPEN:
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # if hasattr(obj, "states") and object_states.Open in obj.states:
+ # action_exec_status = True
+ # print("PRIMITIVE open succeeded")
+ # obj.states[object_states.Open].set_value(True)
+ # else:
+ # print("PRIMITIVE open failed, cannot be opened")
+ # else:
+ # print("PRIMITIVE open failed, too far")
+
+ # elif action_primitive == ActionPrimitives.CLOSE:
+ # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2:
+ # if hasattr(obj, "states") and object_states.Open in obj.states:
+ # action_exec_status = True
+ # obj.states[object_states.Open].set_value(False)
+ # print("PRIMITIVE close succeeded")
+ # else:
+ # print("PRIMITIVE close failed, cannot be opened")
+ # else:
+ # print("PRIMITIVE close failed, too far")
+
+ else:
+ print(f"Attempted to execute a High Level Action whose target was the robot's body! This is incorrect, please pass a sensible high level action argument!")
+
+ state, reward, done, info = super(BehaviorTAMPEnv, self).step(np.zeros(17))
+ print("PRIMITIVE satisfied predicates:", info["satisfied_predicates"])
+ return state, reward, done, info, action_exec_status
+
+ def grasp_obj_at_pos(self, obj, grasp_offset_and_z_rot, use_motion_planning=False):
+ if use_motion_planning:
+ x, y, z = obj.get_position()
+ x += grasp_offset_and_z_rot[0]
+ y += grasp_offset_and_z_rot[1]
+ z += grasp_offset_and_z_rot[2]
+ hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position()
+
+ # # add a little randomness to avoid getting stuck
+ # x += np.random.uniform(-0.025, 0.025)
+ # y += np.random.uniform(-0.025, 0.025)
+ # z += np.random.uniform(-0.025, 0.025)
+
+ minx = min(x, hand_x) - 0.5
+ miny = min(y, hand_y) - 0.5
+ minz = min(z, hand_z) - 0.5
+ maxx = max(x, hand_x) + 0.5
+ maxy = max(y, hand_y) + 0.5
+ maxz = max(z, hand_z) + 0.5
+
+ # compute the angle the hand must be in such that it can grasp the object from its current offset position
+ # This involves aligning the z-axis (in the world frame) of the hand with the vector that goes from the hand
+ # to the object. We can find the rotation matrix that accomplishes this rotation by following:
+ # https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d
+ hand_to_obj_vector = np.array([x - hand_x, y - hand_y, z - hand_z])
+ hand_to_obj_unit_vector = hand_to_obj_vector / np.linalg.norm(hand_to_obj_vector)
+ unit_z_vector = np.array(0,0,1)
+ c = np.dot(unit_z_vector, hand_to_obj_unit_vector)
+ if not c == -1.0:
+ v = np.cross(unit_z_vector, hand_to_obj_unit_vector)
+ s = np.linalg.norm(v)
+ v_x = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
+ R = np.eye(3) + v_x + np.linalg.matrix_power(v_x, 2) * ((1-c)/(s ** 2))
+ r = scipy.spatial.transform.Rotation.from_matrix(R)
+ euler_angles = r.as_euler('xyz')
+ euler_angles[2] += grasp_offset_and_z_rot[3]
+ else:
+ euler_angles = np.array([0.0, np.pi, 0.0])
+
+ state = p.saveState()
+ # plan a motion to above the object
+ plan = plan_hand_motion_br(
+ robot=self.robots[0],
+ obj_in_hand=None,
+ end_conf=[x, y, z, euler_angles[0], euler_angles[1], euler_angles[2]],
+ hand_limits=((minx, miny, minz), (maxx, maxy, maxz)),
+ obstacles=self.get_body_ids(include_self=True),
+ )
+ p.restoreState(state)
+ p.removeState(state)
+
+ if plan is not None:
+ grasp_success = self.execute_grasp_plan(plan, obj)
+ logging.debug("grasp success", grasp_success)
+ if grasp_success:
+ self.obj_in_hand = obj
+ else:
+ logging.debug("grasp failed")
+ self.reset_and_release_hand()
+ else:
+ logging.debug("plan is None")
+ self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
+ self.reset_and_release_hand()
+ else:
+ self.obj_in_hand = obj
+ obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position()))
+ self.robots[0].parts["right_hand"].set_close_fraction(1)
+ self.robots[0].parts["right_hand"].trigger_fraction = 1
+ p.stepSimulation()
+ obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position()))
+ self.robots[0].parts["right_hand"].handle_assisted_grasping(
+ np.zeros(
+ 28,
+ ),
+ override_ag_data=(obj.body_id[0], -1),
+ )
+
+ def execute_grasp_plan(self, plan, obj):
+ for x, y, z, roll, pitch, yaw in plan:
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ p.stepSimulation()
+ # s, a = get_last_action(p)
+
+ x, y, z, roll, pitch, yaw = plan[-1]
+
+ # lower the hand until it touches the object
+ for i in range(25):
+ self.robots[0].parts["right_hand"].move([x, y, z - i * 0.005], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ p.stepSimulation()
+
+ # close the hand
+ for _ in range(50):
+ self.robots[0].parts["right_hand"].set_close_fraction(1)
+ self.robots[0].parts["right_hand"].trigger_fraction = 1
+ p.stepSimulation()
+
+ grasp_success = (
+ self.robots[0]
+ .parts["right_hand"]
+ .handle_assisted_grasping(
+ np.zeros(
+ 28,
+ ),
+ override_ag_data=(obj.body_id[0], -1),
+ )
+ )
+
+ # reverse the plan and get object close to torso
+ for x, y, z, roll, pitch, yaw in plan[::-1]:
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ p.stepSimulation()
+
+ return grasp_success
+
+ def place_obj(self, original_state, target_pos, target_orn, use_motion_planning=False):
+ pos = self.obj_in_hand.get_position()
+ p.restoreState(original_state)
+ p.removeState(original_state)
+ if not use_motion_planning:
+ self.reset_and_release_hand()
+ self.robots[0].parts["right_hand"].force_release_obj()
+ self.obj_in_hand.set_position_orientation(target_pos, target_orn)
+ self.obj_in_hand = None
+
+ else:
+ x, y, z = target_pos
+ hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position()
+
+ minx = min(x, hand_x) - 1
+ miny = min(y, hand_y) - 1
+ minz = min(z, hand_z) - 0.5
+ maxx = max(x, hand_x) + 1
+ maxy = max(y, hand_y) + 1
+ maxz = max(z, hand_z) + 0.5
+
+ state = p.saveState()
+ obstacles = self.get_body_ids()
+ obstacles.remove(self.obj_in_hand.body_id[0])
+ plan = plan_hand_motion_br(
+ robot=self.robots[0],
+ obj_in_hand=self.obj_in_hand,
+ end_conf=[x, y, z + 0.1, 0, np.pi * 5 / 6.0, 0],
+ hand_limits=((minx, miny, minz), (maxx, maxy, maxz)),
+ obstacles=obstacles,
+ ) #
+ p.restoreState(state)
+ p.removeState(state)
+
+ if plan:
+ for x, y, z, roll, pitch, yaw in plan:
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ p.stepSimulation()
+ released_obj = self.obj_in_hand
+ self.obj_in_hand = None
+
+ # release hand
+ self.reset_and_release_hand()
+
+ # force release object to avoid dealing with stateful AG release mechanism
+ self.robots[0].parts["right_hand"].force_release_obj()
+ self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
+ # reset hand
+
+ # reset the released object to zero velocity
+ p.resetBaseVelocity(released_obj.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0])
+
+ # let object fall
+ for _ in range(100):
+ p.stepSimulation()
+
+ def place_obj_plan(self, original_state, target_pos, target_orn, use_motion_planning=False):
+ pos = self.obj_in_hand.get_position()
+ p.restoreState(original_state)
+ p.removeState(original_state)
+ if not use_motion_planning:
+ self.reset_and_release_hand()
+ self.robots[0].parts["right_hand"].force_release_obj()
+ self.obj_in_hand.set_position_orientation(target_pos, target_orn)
+ self.obj_in_hand = None
+
+ else:
+ x, y, z = target_pos
+ hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position()
+
+ minx = min(x, hand_x) - 1
+ miny = min(y, hand_y) - 1
+ minz = min(z, hand_z) - 0.5
+ maxx = max(x, hand_x) + 1
+ maxy = max(y, hand_y) + 1
+ maxz = max(z, hand_z) + 0.5
+
+ state = p.saveState()
+ obstacles = self.get_body_ids()
+ obstacles.remove(self.obj_in_hand.body_id[0])
+ plan = plan_hand_motion_br(
+ robot=self.robots[0],
+ obj_in_hand=self.obj_in_hand,
+ end_conf=[x, y, z + 0.1, 0, np.pi * 5 / 6.0, 0],
+ hand_limits=((minx, miny, minz), (maxx, maxy, maxz)),
+ obstacles=obstacles,
+ ) #
+ p.restoreState(state)
+ p.removeState(state)
+
+ if plan:
+ for x, y, z, roll, pitch, yaw in plan:
+ self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ p.stepSimulation()
+ released_obj = self.obj_in_hand
+ self.obj_in_hand = None
+
+ # release hand
+ self.reset_and_release_hand()
+
+ # force release object to avoid dealing with stateful AG release mechanism
+ self.robots[0].parts["right_hand"].force_release_obj()
+ self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation())
+ # reset hand
+
+ # reset the released object to zero velocity
+ p.resetBaseVelocity(released_obj.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0])
+
+ # let object fall
+ for _ in range(100):
+ p.stepSimulation()
+
+ return plan
+
+ # def convert_nav_action_to_17dim_action(nav_action):
+ # """
+ # :param nav_action: a 3-dim numpy array that represents the x-position, y-position and
+ # z-rotation of the robot in the world frame
+ # """
+ # curr_body_pos = self.robots[0].get_position()
+ # curr_body_eul = p.getEulerFromQuaternion(self.robots[0].parts["right_hand"].get_orientation())
+ # curr_right_pos = self.robots[0].parts["right_hand"].get_position()
+ # curr_right_eul = p.getEulerFromQuaternion(self.robots[0].parts["right_hand"].get_orientation())
+
+ # curr_body_pos_arr = np.zeros(3)
+ # curr_body_pos_arr[0:2] = np.array(curr_body_pos)
+ # new_body_pos_arr = np.zeros(3)
+ # curr_body_pos_arr[0:2] = np.array(nav_action[0:2])
+ # new_body_pos_arr[2] = curr_body_pos_arr[2]
+ # right_pos_offset = curr_body_pos_arr - np.array(curr_right_pos)
+ # new_right_pos = new_body_pos_arr - right_pos_offset
+
+
+
+ def sample_fn(self):
+ random_point = self.scene.get_random_point()
+ x, y = random_point[1][:2]
+ theta = np.random.uniform(*CIRCULAR_LIMITS)
+ return (x, y, theta)
+
+ def navigate_to_obj_pos(self, obj, pos_offset):
+ """
+ navigates the robot to a certain x,y position and selects an orientation
+ such that the robot is facing the object. If the navigation is infeasible,
+ returns an indication to this effect.
+
+ :param obj: an object to navigate toward
+ :param to_pos: a length-2 numpy array (x, y) containing a position to navigate to
+ """
+
+ # test agent positions around an obj
+ # try to place the agent near the object, and rotate it to the object
+ valid_position = None # ((x,y,z),(roll, pitch, yaw))
+ original_position = self.robots[0].get_position()
+ original_orientation = self.robots[0].get_orientation()
+ base_diff_fn = get_base_difference_fn()
+
+ if isinstance(obj, URDFObject): # must be a URDFObject so we can get its position!
+ obj_pos = obj.get_position()
+ pos = [pos_offset[0] + obj_pos[0], pos_offset[1] + obj_pos[1], self.robots[0].initial_z_offset]
+ yaw_angle = np.arctan2(pos_offset[1], pos_offset[0])
+ orn = [0, 0, yaw_angle]
+ self.robots[0].set_position_orientation(pos, p.getQuaternionFromEuler(orn))
+ eye_pos = self.robots[0].parts["eye"].get_position()
+ ray_test_res = p.rayTest(eye_pos, obj_pos)
+ # Test to see if the robot is obstructed by some object, but make sure that object
+ # is not either the robot's body or the object we want to pick up!
+ blocked = len(ray_test_res) > 0 and (ray_test_res[0][0] not in (self.robots[0].parts["body"].get_body_id(), obj.get_body_id()))
+ if not detect_robot_collision(self.robots[0]) and not blocked:
+ valid_position = (pos, orn)
+
+ if valid_position is not None:
+ self.robots[0].set_position_orientation(original_position, original_orientation)
+ plan = plan_base_motion_br(
+ robot=self.robots[0],
+ end_conf=[valid_position[0][0], valid_position[0][1], valid_position[1][2]],
+ base_limits=(),
+ obstacles=self.get_body_ids(),
+ override_sample_fn=self.sample_fn,
+ )
+
+ plan_num_steps = len(plan)
+ ret_actions = np.zeros((17, plan_num_steps - 1))
+ plan_arr = np.array(plan)
+
+ for i in range(1, plan_num_steps):
+ ret_actions[0:3, i] = base_diff_fn(plan_arr[:, i], plan_arr[:, i-1])
+
+ return ret_actions, True
+
+ else:
+ print("Position commanded is in collision!")
+ self.robots[0].set_position_orientation(original_position, original_orientation)
+ return np.zeros(17), False
+
+
+ def reset(self, resample_objects=False):
+ obs = super(BehaviorTAMPEnv, self).reset()
+ self.obj_in_hand = None
+
+ return obs
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--config",
+ "-c",
+ default="igibson/examples/configs/behavior.yaml",
+ help="which config file to use [default: use yaml files in examples/configs]",
+ )
+ parser.add_argument(
+ "--mode",
+ "-m",
+ choices=["headless", "gui", "iggui", "pbgui"],
+ default="gui",
+ help="which mode for simulation (default: headless)",
+ )
+ args = parser.parse_args()
+
+ env = BehaviorTAMPEnv(
+ config_file=args.config,
+ mode=args.mode,
+ action_timestep=1.0 / 300.0,
+ physics_timestep=1.0 / 300.0,
+ use_motion_planning=True,
+ )
+ step_time_list = []
+ for episode in range(100):
+ print("Episode: {}".format(episode))
+ start = time.time()
+ env.reset()
+ for i in range(1000): # 10 seconds
+ action = env.action_space.sample()
+ state, reward, done, info = env.step(action)
+ print(reward, info)
+ if done:
+ break
+ print("Episode finished after {} timesteps, took {} seconds.".format(env.current_step, time.time() - start))
+ env.close()
diff --git a/igibson/envs/env_base.py b/igibson/envs/env_base.py
index 03ad36092..8a89e41a7 100644
--- a/igibson/envs/env_base.py
+++ b/igibson/envs/env_base.py
@@ -1,21 +1,28 @@
import gym
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
-from igibson.robots import REGISTERED_ROBOTS
+from igibson.robots.ant_robot import Ant
+from igibson.robots.fetch_robot import Fetch
+from igibson.robots.freight_robot import Freight
+from igibson.robots.humanoid_robot import Humanoid
+from igibson.robots.husky_robot import Husky
+from igibson.robots.jr2_kinova_robot import JR2_Kinova
+from igibson.robots.jr2_robot import JR2
+from igibson.robots.locobot_robot import Locobot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.empty_scene import EmptyScene
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.scenes.stadium_scene import StadiumScene
from igibson.simulator import Simulator
-from igibson.simulator_vr import SimulatorVR
from igibson.utils.utils import parse_config
class BaseEnv(gym.Env):
"""
- Base Env class that handles loading scene and robot, following OpenAI Gym interface.
- Functions like reset and step are not implemented.
+ Base Env class, follows OpenAI Gym interface
+ Handles loading scene and robot
+ Functions like reset and step are not implemented
"""
def __init__(
@@ -25,21 +32,16 @@ def __init__(
mode="headless",
action_timestep=1 / 10.0,
physics_timestep=1 / 240.0,
- rendering_settings=None,
- vr_settings=None,
+ render_to_tensor=False,
device_idx=0,
- use_pb_gui=False,
):
"""
:param config_file: config_file path
:param scene_id: override scene_id in config file
- :param mode: headless, headless_tensor, gui_interactive, gui_non_interactive, vr
+ :param mode: headless or gui mode
:param action_timestep: environment executes action per action_timestep second
:param physics_timestep: physics timestep for pybullet
- :param rendering_settings: rendering_settings to override the default one
- :param vr_settings: vr_settings to override the default one
:param device_idx: device_idx: which GPU to run the simulation and rendering on
- :param use_pb_gui: concurrently display the interactive pybullet gui (for debugging)
"""
self.config = parse_config(config_file)
if scene_id is not None:
@@ -48,60 +50,37 @@ def __init__(
self.mode = mode
self.action_timestep = action_timestep
self.physics_timestep = physics_timestep
- self.rendering_settings = rendering_settings
- self.vr_settings = vr_settings
self.texture_randomization_freq = self.config.get("texture_randomization_freq", None)
self.object_randomization_freq = self.config.get("object_randomization_freq", None)
self.object_randomization_idx = 0
self.num_object_randomization_idx = 10
- enable_shadow = self.config.get("enable_shadow", True)
+ enable_shadow = self.config.get("enable_shadow", False)
enable_pbr = self.config.get("enable_pbr", True)
texture_scale = self.config.get("texture_scale", 1.0)
- if self.rendering_settings is None:
- # TODO: We currently only support the optimized renderer due to some issues with obj highlighting.
- self.rendering_settings = MeshRendererSettings(
- enable_shadow=enable_shadow,
- enable_pbr=enable_pbr,
- msaa=False,
- texture_scale=texture_scale,
- optimized=self.config.get("optimized_renderer", True),
- load_textures=self.config.get("load_texture", True),
- )
-
- if mode == "vr":
- if self.vr_settings is None:
- self.vr_settings = VrSettings(use_vr=True)
- self.simulator = SimulatorVR(
- physics_timestep=physics_timestep,
- render_timestep=action_timestep,
- image_width=self.config.get("image_width", 128),
- image_height=self.config.get("image_height", 128),
- vertical_fov=self.config.get("vertical_fov", 90),
- device_idx=device_idx,
- rendering_settings=self.rendering_settings,
- vr_settings=self.vr_settings,
- use_pb_gui=use_pb_gui,
- )
- else:
- self.simulator = Simulator(
- mode=mode,
- physics_timestep=physics_timestep,
- render_timestep=action_timestep,
- image_width=self.config.get("image_width", 128),
- image_height=self.config.get("image_height", 128),
- vertical_fov=self.config.get("vertical_fov", 90),
- device_idx=device_idx,
- rendering_settings=self.rendering_settings,
- use_pb_gui=use_pb_gui,
- )
+ # TODO: We currently only support the optimized renderer due to some issues with obj highlighting
+ settings = MeshRendererSettings(
+ enable_shadow=enable_shadow, enable_pbr=enable_pbr, msaa=False, texture_scale=texture_scale, optimized=False
+ )
+
+ self.simulator = Simulator(
+ mode=mode,
+ physics_timestep=physics_timestep,
+ render_timestep=action_timestep,
+ image_width=self.config.get("image_width", 128),
+ image_height=self.config.get("image_height", 128),
+ vertical_fov=self.config.get("vertical_fov", 90),
+ device_idx=device_idx,
+ render_to_tensor=render_to_tensor,
+ rendering_settings=settings,
+ )
self.load()
def reload(self, config_file):
"""
- Reload another config file.
- This allows one to change the configuration on the fly.
+ Reload another config file
+ Thhis allows one to change the configuration on the fly
:param config_file: new config file path
"""
@@ -111,8 +90,8 @@ def reload(self, config_file):
def reload_model(self, scene_id):
"""
- Reload another scene model.
- This allows one to change the scene on the fly.
+ Reload another scene model
+ This allows one to change the scene on the fly
:param scene_id: new scene_id
"""
@@ -122,7 +101,7 @@ def reload_model(self, scene_id):
def reload_model_object_randomization(self):
"""
- Reload the same model, with the next object randomization random seed.
+ Reload the same model, with the next object randomization random seed
"""
if self.object_randomization_freq is None:
return
@@ -130,14 +109,26 @@ def reload_model_object_randomization(self):
self.simulator.reload()
self.load()
+ def get_next_scene_random_seed(self):
+ """
+ Get the next scene random seed
+ """
+ if self.object_randomization_freq is None:
+ return None
+ return self.scene_random_seeds[self.scene_random_seed_idx]
+
def load(self):
"""
- Load the scene and robot specified in the config file.
+ Load the scene and robot
"""
if self.config["scene"] == "empty":
scene = EmptyScene()
+ self.simulator.import_scene(
+ scene, load_texture=self.config.get("load_texture", True), render_floor_plane=True
+ )
elif self.config["scene"] == "stadium":
scene = StadiumScene()
+ self.simulator.import_scene(scene, load_texture=self.config.get("load_texture", True))
elif self.config["scene"] == "gibson":
scene = StaticIndoorScene(
self.config["scene_id"],
@@ -148,76 +139,67 @@ def load(self):
trav_map_erosion=self.config.get("trav_map_erosion", 2),
pybullet_load_texture=self.config.get("pybullet_load_texture", False),
)
+ self.simulator.import_scene(scene, load_texture=self.config.get("load_texture", True))
elif self.config["scene"] == "igibson":
- urdf_file = self.config.get("urdf_file", None)
- if urdf_file is None and not self.config.get("online_sampling", True):
- urdf_file = "{}_task_{}_{}_{}_fixed_furniture".format(
- self.config["scene_id"],
- self.config["task"],
- self.config["task_id"],
- self.config["instance_id"],
- )
scene = InteractiveIndoorScene(
self.config["scene_id"],
- urdf_file=urdf_file,
waypoint_resolution=self.config.get("waypoint_resolution", 0.2),
num_waypoints=self.config.get("num_waypoints", 10),
build_graph=self.config.get("build_graph", False),
trav_map_resolution=self.config.get("trav_map_resolution", 0.1),
trav_map_erosion=self.config.get("trav_map_erosion", 2),
trav_map_type=self.config.get("trav_map_type", "with_obj"),
+ pybullet_load_texture=self.config.get("pybullet_load_texture", False),
texture_randomization=self.texture_randomization_freq is not None,
object_randomization=self.object_randomization_freq is not None,
object_randomization_idx=self.object_randomization_idx,
should_open_all_doors=self.config.get("should_open_all_doors", False),
load_object_categories=self.config.get("load_object_categories", None),
- not_load_object_categories=self.config.get("not_load_object_categories", None),
load_room_types=self.config.get("load_room_types", None),
load_room_instances=self.config.get("load_room_instances", None),
- merge_fixed_links=self.config.get("merge_fixed_links", True)
- and not self.config.get("online_sampling", False),
)
- # TODO: Unify the function import_scene and take out of the if-else clauses.
+ # TODO: Unify the function import_scene and take out of the if-else clauses
first_n = self.config.get("_set_first_n_objects", -1)
if first_n != -1:
scene._set_first_n_objects(first_n)
-
- self.simulator.import_scene(scene)
-
- # Get robot config
- robot_config = self.config["robot"]
-
- # Get corresponding robot class
- robot_name = robot_config.pop("name")
- assert robot_name in REGISTERED_ROBOTS, "Got invalid robot to instantiate: {}".format(robot_name)
-
- # TODO: Remove if statement once BEHAVIOR robot is refactored
- if robot_name == "BehaviorRobot":
- robot = REGISTERED_ROBOTS[robot_name](self.simulator)
- else:
- robot = REGISTERED_ROBOTS[robot_name](**robot_config)
-
- self.simulator.import_robot(robot)
-
- # TODO: Remove if statement once BEHAVIOR robot is refactored
- if robot_name == "BehaviorRobot":
- self.robot_body_id = robot.links["body"].get_body_id()
+ self.simulator.import_ig_scene(scene)
+
+ if self.config["robot"] == "Turtlebot":
+ robot = Turtlebot(self.config)
+ elif self.config["robot"] == "Husky":
+ robot = Husky(self.config)
+ elif self.config["robot"] == "Ant":
+ robot = Ant(self.config)
+ elif self.config["robot"] == "Humanoid":
+ robot = Humanoid(self.config)
+ elif self.config["robot"] == "JR2":
+ robot = JR2(self.config)
+ elif self.config["robot"] == "JR2_Kinova":
+ robot = JR2_Kinova(self.config)
+ elif self.config["robot"] == "Freight":
+ robot = Freight(self.config)
+ elif self.config["robot"] == "Fetch":
+ robot = Fetch(self.config)
+ elif self.config["robot"] == "Locobot":
+ robot = Locobot(self.config)
else:
- self.robot_body_id = robot.get_body_id()
+ raise Exception("unknown robot type: {}".format(self.config["robot"]))
- self.scene = self.simulator.scene
- self.robots = self.simulator.robots
+ self.scene = scene
+ self.robots = [robot]
+ for robot in self.robots:
+ self.simulator.import_robot(robot)
def clean(self):
"""
- Clean up the environment.
+ Clean up
"""
if self.simulator is not None:
self.simulator.disconnect()
def close(self):
"""
- Synonymous function with clean.
+ Synonymous function with clean
"""
self.clean()
@@ -231,12 +213,18 @@ def simulator_step(self):
def step(self, action):
"""
- Overwritten by subclasses.
+ Overwritten by subclasses
"""
return NotImplementedError()
def reset(self):
"""
- Overwritten by subclasses.
+ Overwritten by subclasses
"""
return NotImplementedError()
+
+ def set_mode(self, mode):
+ """
+ Set simulator mode
+ """
+ self.simulator.mode = mode
diff --git a/igibson/envs/igibson_env.py b/igibson/envs/igibson_env.py
index 7b4e087b0..38f8e80db 100644
--- a/igibson/envs/igibson_env.py
+++ b/igibson/envs/igibson_env.py
@@ -1,6 +1,5 @@
import argparse
import logging
-import os
import time
from collections import OrderedDict
@@ -11,13 +10,10 @@
from igibson.envs.env_base import BaseEnv
from igibson.external.pybullet_tools.utils import stable_z_on_aabb
-from igibson.robots.behavior_robot import BehaviorRobot
from igibson.robots.robot_base import BaseRobot
from igibson.sensors.bump_sensor import BumpSensor
from igibson.sensors.scan_sensor import ScanSensor
from igibson.sensors.vision_sensor import VisionSensor
-from igibson.tasks.behavior_task import BehaviorTask
-from igibson.tasks.dummy_task import DummyTask
from igibson.tasks.dynamic_nav_random_task import DynamicNavRandomTask
from igibson.tasks.interactive_nav_random_task import InteractiveNavRandomTask
from igibson.tasks.point_nav_fixed_task import PointNavFixedTask
@@ -30,7 +26,7 @@
class iGibsonEnv(BaseEnv):
"""
- iGibson Environment (OpenAI Gym interface).
+ iGibson Environment (OpenAI Gym interface)
"""
def __init__(
@@ -40,23 +36,19 @@ def __init__(
mode="headless",
action_timestep=1 / 10.0,
physics_timestep=1 / 240.0,
- rendering_settings=None,
- vr_settings=None,
device_idx=0,
+ render_to_tensor=False,
automatic_reset=False,
- use_pb_gui=False,
):
"""
:param config_file: config_file path
:param scene_id: override scene_id in config file
- :param mode: headless, headless_tensor, gui_interactive, gui_non_interactive, vr
+ :param mode: headless, gui, iggui
:param action_timestep: environment executes action per action_timestep second
:param physics_timestep: physics timestep for pybullet
- :param rendering_settings: rendering_settings to override the default one
- :param vr_settings: vr_settings to override the default one
:param device_idx: which GPU to run the simulation and rendering on
+ :param render_to_tensor: whether to render directly to pytorch tensors
:param automatic_reset: whether to automatic reset after an episode finishes
- :param use_pb_gui: concurrently display the interactive pybullet gui (for debugging)
"""
super(iGibsonEnv, self).__init__(
config_file=config_file,
@@ -64,16 +56,14 @@ def __init__(
mode=mode,
action_timestep=action_timestep,
physics_timestep=physics_timestep,
- rendering_settings=rendering_settings,
- vr_settings=vr_settings,
device_idx=device_idx,
- use_pb_gui=use_pb_gui,
+ render_to_tensor=render_to_tensor,
)
self.automatic_reset = automatic_reset
def load_task_setup(self):
"""
- Load task setup.
+ Load task setup
"""
self.initial_pos_z_offset = self.config.get("initial_pos_z_offset", 0.1)
# s = 0.5 * G * (t ** 2)
@@ -93,9 +83,7 @@ def load_task_setup(self):
self.object_randomization_freq = self.config.get("object_randomization_freq", None)
# task
- if "task" not in self.config:
- self.task = DummyTask(self)
- elif self.config["task"] == "point_nav_fixed":
+ if self.config["task"] == "point_nav_fixed":
self.task = PointNavFixedTask(self)
elif self.config["task"] == "point_nav_random":
self.task = PointNavRandomTask(self)
@@ -108,32 +96,17 @@ def load_task_setup(self):
elif self.config["task"] == "room_rearrangement":
self.task = RoomRearrangementTask(self)
else:
- try:
- import bddl
-
- with open(os.path.join(os.path.dirname(bddl.__file__), "activity_manifest.txt")) as f:
- all_activities = [line.strip() for line in f.readlines()]
-
- if self.config["task"] in all_activities:
- self.task = BehaviorTask(self)
- else:
- raise Exception("Invalid task: {}".format(self.config["task"]))
- except ImportError:
- raise Exception("bddl is not available.")
+ self.task = None
def build_obs_space(self, shape, low, high):
"""
- Helper function that builds individual observation spaces.
-
- :param shape: shape of the space
- :param low: lower bounds of the space
- :param high: higher bounds of the space
+ Helper function that builds individual observation spaces
"""
return gym.spaces.Box(low=low, high=high, shape=shape, dtype=np.float32)
def load_observation_space(self):
"""
- Load observation space.
+ Load observation space
"""
self.output = self.config["output"]
self.image_width = self.config.get("image_width", 128)
@@ -215,10 +188,6 @@ def load_observation_space(self):
if "bump" in self.output:
observation_space["bump"] = gym.spaces.Box(low=0.0, high=1.0, shape=(1,))
sensors["bump"] = BumpSensor(self)
- if "proprioception" in self.output:
- observation_space["proprioception"] = self.build_obs_space(
- shape=(self.robots[0].proprioception_dim,), low=-np.inf, high=np.inf
- )
if len(vision_modalities) > 0:
sensors["vision"] = VisionSensor(self, vision_modalities)
@@ -231,13 +200,13 @@ def load_observation_space(self):
def load_action_space(self):
"""
- Load action space.
+ Load action space
"""
self.action_space = self.robots[0].action_space
def load_miscellaneous_variables(self):
"""
- Load miscellaneous variables for book keeping.
+ Load miscellaneous variables for book keeping
"""
self.current_step = 0
self.collision_step = 0
@@ -246,7 +215,7 @@ def load_miscellaneous_variables(self):
def load(self):
"""
- Load environment.
+ Load environment
"""
super(iGibsonEnv, self).load()
self.load_task_setup()
@@ -254,10 +223,11 @@ def load(self):
self.load_action_space()
self.load_miscellaneous_variables()
- def get_state(self):
+ def get_state(self, collision_links=[]):
"""
- Get the current observation.
+ Get the current observation
+ :param collision_links: collisions from last physics timestep
:return: observation as a dictionary
"""
state = OrderedDict()
@@ -273,24 +243,22 @@ def get_state(self):
state[modality] = scan_obs[modality]
if "bump" in self.sensors:
state["bump"] = self.sensors["bump"].get_obs(self)
- if "proprioception" in self.sensors:
- state["proprioception"] = np.array(self.robots[0].get_proprioception())
return state
def run_simulation(self):
"""
- Run simulation for one action timestep (same as one render timestep in Simulator class).
+ Run simulation for one action timestep (same as one render timestep in Simulator class)
- :return: a list of collisions from the last physics timestep
+ :return: collision_links: collisions from last physics timestep
"""
self.simulator_step()
- collision_links = list(p.getContactPoints(bodyA=self.robot_body_id))
+ collision_links = list(p.getContactPoints(bodyA=self.robots[0].robot_ids[0]))
return self.filter_collision_links(collision_links)
def filter_collision_links(self, collision_links):
"""
- Filter out collisions that should be ignored.
+ Filter out collisions that should be ignored
:param collision_links: original collisions, a list of collisions
:return: filtered collisions
@@ -306,23 +274,22 @@ def filter_collision_links(self, collision_links):
continue
# ignore self collision with robot link a (body b is also robot itself)
- if item[2] == self.robot_body_id and item[4] in self.collision_ignore_link_a_ids:
+ if item[2] == self.robots[0].robot_ids[0] and item[4] in self.collision_ignore_link_a_ids:
continue
new_collision_links.append(item)
return new_collision_links
def populate_info(self, info):
"""
- Populate info dictionary with any useful information.
-
- :param info: the info dictionary to populate
+ Populate info dictionary with any useful information
"""
info["episode_length"] = self.current_step
info["collision_step"] = self.collision_step
def step(self, action):
"""
- Apply robot's action and return the next state, reward, done and info,
+ Apply robot's action.
+ Returns the next state, reward, done and info,
following OpenAI Gym's convention
:param action: robot actions
@@ -338,7 +305,7 @@ def step(self, action):
self.collision_links = collision_links
self.collision_step += int(len(collision_links) > 0)
- state = self.get_state()
+ state = self.get_state(collision_links)
info = {}
reward, info = self.task.get_reward(self, collision_links, action, info)
done, info = self.task.get_termination(self, collision_links, action, info)
@@ -353,10 +320,10 @@ def step(self, action):
def check_collision(self, body_id):
"""
- Check whether the given body_id has collision after one simulator step
+ Check with the given body_id has any collision after one simulator step
:param body_id: pybullet body id
- :return: whether the given body_id has collision
+ :return: whether the given body_id has no collision
"""
self.simulator_step()
collisions = list(p.getContactPoints(bodyA=body_id))
@@ -365,11 +332,11 @@ def check_collision(self, body_id):
for item in collisions:
logging.debug("bodyA:{}, bodyB:{}, linkA:{}, linkB:{}".format(item[1], item[2], item[3], item[4]))
- return len(collisions) > 0
+ return len(collisions) == 0
def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None):
"""
- Reset position and orientation for the robot or the object.
+ Reset position and orientation for the robot or the object
:param obj: an instance of robot or object
:param pos: position
@@ -383,7 +350,7 @@ def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None):
offset = self.initial_pos_z_offset
is_robot = isinstance(obj, BaseRobot)
- body_id = obj.get_body_id()
+ body_id = obj.robot_ids[0] if is_robot else obj.body_id
# first set the correct orientation
obj.set_position_orientation(pos, quatToXYZW(euler2quat(*orn), "wxyz"))
# compute stable z based on this orientation
@@ -394,28 +361,28 @@ def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None):
def test_valid_position(self, obj, pos, orn=None):
"""
- Test if the robot or the object can be placed with no collision.
+ Test if the robot or the object can be placed with no collision
:param obj: an instance of robot or object
:param pos: position
:param orn: orientation
- :return: whether the position is valid
+ :return: validity
"""
is_robot = isinstance(obj, BaseRobot)
self.set_pos_orn_with_z_offset(obj, pos, orn)
if is_robot:
- obj.reset()
+ obj.robot_specific_reset()
obj.keep_still()
- body_id = obj.get_body_id()
+ body_id = obj.robot_ids[0] if is_robot else obj.body_id
has_collision = self.check_collision(body_id)
- return not has_collision
+ return has_collision
def land(self, obj, pos, orn):
"""
- Land the robot or the object onto the floor, given a valid position and orientation.
+ Land the robot or the object onto the floor, given a valid position and orientation
:param obj: an instance of robot or object
:param pos: position
@@ -426,10 +393,10 @@ def land(self, obj, pos, orn):
self.set_pos_orn_with_z_offset(obj, pos, orn)
if is_robot:
- obj.reset()
+ obj.robot_specific_reset()
obj.keep_still()
- body_id = obj.get_body_id()
+ body_id = obj.robot_ids[0] if is_robot else obj.body_id
land_success = False
# land for maximum 1 second, should fall down ~5 meters
@@ -441,15 +408,15 @@ def land(self, obj, pos, orn):
break
if not land_success:
- logging.warning("Object failed to land.")
+ print("WARNING: Failed to land")
if is_robot:
- obj.reset()
+ obj.robot_specific_reset()
obj.keep_still()
def reset_variables(self):
"""
- Reset bookkeeping variables for the next new episode.
+ Reset bookkeeping variables for the next new episode
"""
self.current_episode += 1
self.current_step = 0
@@ -458,9 +425,9 @@ def reset_variables(self):
def randomize_domain(self):
"""
- Randomize domain.
- Object randomization loads new object models with the same poses.
- Texture randomization loads new materials and textures for the same object models.
+ Domain randomization
+ Object randomization loads new object models with the same poses
+ Texture randomization loads new materials and textures for the same object models
"""
if self.object_randomization_freq is not None:
if self.current_episode % self.object_randomization_freq == 0:
@@ -471,13 +438,14 @@ def randomize_domain(self):
def reset(self):
"""
- Reset episode.
+ Reset episode
"""
self.randomize_domain()
- # Move robot away from the scene.
+ # move robot away from the scene
self.robots[0].set_position([100.0, 100.0, 100.0])
- self.task.reset(self)
- self.simulator.sync(force_sync=True)
+ self.task.reset_scene(self)
+ self.task.reset_agent(self)
+ self.simulator.sync()
state = self.get_state()
self.reset_variables()
@@ -490,7 +458,7 @@ def reset(self):
parser.add_argument(
"--mode",
"-m",
- choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"],
+ choices=["headless", "gui", "iggui"],
default="headless",
help="which mode for simulation (default: headless)",
)
diff --git a/igibson/envs/igibson_ray_env.py b/igibson/envs/igibson_ray_env.py
index e418af10c..136820281 100644
--- a/igibson/envs/igibson_ray_env.py
+++ b/igibson/envs/igibson_ray_env.py
@@ -28,7 +28,7 @@ def get_current_step(self):
parser.add_argument(
"--mode",
"-m",
- choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"],
+ choices=["headless", "gui", "iggui"],
default="headless",
help="which mode for simulation (default: headless)",
)
diff --git a/igibson/envs/igibson_rllib_env.py b/igibson/envs/igibson_rllib_env.py
index bdf3eec62..7b8acc25a 100644
--- a/igibson/envs/igibson_rllib_env.py
+++ b/igibson/envs/igibson_rllib_env.py
@@ -135,7 +135,7 @@ def __init__(self, env_config):
parser.add_argument(
"--config",
"-c",
- default=os.path.join(igibson.root_path, "examples", "configs", "turtlebot_nav.yaml"),
+ default=os.path.join(igibson.root_path, "examples", "configs", "turtlebot_point_nav.yaml"),
help="which config file to use [default: use yaml files in examples/configs]",
)
parser.add_argument("--ray_mode", default="train", help="Whether to run ray in train or test mode")
@@ -152,7 +152,7 @@ def __init__(self, env_config):
parser.add_argument(
"--mode",
"-m",
- choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"],
+ choices=["headless", "gui", "iggui"],
default="headless",
help="which mode for simulation (default: headless)",
)
diff --git a/igibson/envs/parallel_env.py b/igibson/envs/parallel_env.py
index ae25ecd91..c9a901b4d 100644
--- a/igibson/envs/parallel_env.py
+++ b/igibson/envs/parallel_env.py
@@ -243,7 +243,7 @@ def _worker(self, conn, env_constructor, flatten=False):
if __name__ == "__main__":
- config_filename = os.path.join(os.path.dirname(igibson.__file__), "..", "tests", "test.yaml")
+ config_filename = os.path.join(os.path.dirname(igibson.__file__), "test", "test.yaml")
def load_env():
return iGibsonEnv(config_file=config_filename, mode="headless")
diff --git a/igibson/examples/README.md b/igibson/examples/README.md
deleted file mode 100644
index 685523a5d..000000000
--- a/igibson/examples/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-### Code Examples
-
-The following examples illustrate the use of iGibson.
-
-If you are interested in just getting started as an end-user, you only need check out `./environments`.
-
-If you are looking for examples of BEHAVIOR, the benchmark of household activities that uses iGibson, please check the BEHAVIOR repository at https://github.com/StanfordVL/behavior.
-
-- environments: how to instantiate iGibson environments with interactive or static scenes, optionally with a scene selector.
-- learning: how to train RL policies for robot navigation using stable baselines 3, and how to save and replay demos of agents for imitation learning.
-- objects: how to create, load, and place objects to predefined locations or using a logic sampler (e.g. onTop(A, B)), how to change texture as a function of the temperature, and how to generate the minimum volume bounding boxes of objects.
-- object_states: how to change various objects states, including dusty, stained, (water sources) toggled on, (cleaning tool) soaked, sliced, and temprature, and how to save and reload object states.
-- observations: how to generate different observation modalities such as RGB, depth, LiDAR, segmentation, etc.
-- renderer: how to use the renderer directly, without the physics engine.
-- robots: how to (keyboard) control robots with differential drive controllers, IK controllers and sampling-based motion planners.
-- ros: how to run ROS with iGibson as if it is the real world.
-- scenes: how to load interactive and non-interactive scenes, how to use domain randomization (of object models and/or texture), and how to create a tour video of the scenes.
-- vr: how to use iGibson with VR.
-- web_ui: how to start a web server that hosts iGibson environments.
diff --git a/igibson/examples/learning/__init__.py b/igibson/examples/behavior/__init__.py
similarity index 100%
rename from igibson/examples/learning/__init__.py
rename to igibson/examples/behavior/__init__.py
diff --git a/igibson/examples/behavior/behavior_debug_replay.py b/igibson/examples/behavior/behavior_debug_replay.py
new file mode 100644
index 000000000..b18148eda
--- /dev/null
+++ b/igibson/examples/behavior/behavior_debug_replay.py
@@ -0,0 +1,199 @@
+"""
+Debugging script to bypass taksnet
+"""
+
+import argparse
+import datetime
+import os
+
+import igibson
+from igibson.metrics.agent import AgentMetric
+from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
+from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
+from igibson.robots.behavior_robot import BehaviorRobot
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+from igibson.utils.ig_logging import IGLogReader, IGLogWriter
+
+POST_TASK_STEPS = 200
+PHYSICS_WARMING_STEPS = 200
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
+ parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log to replay")
+ parser.add_argument(
+ "--vr_replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)"
+ )
+ parser.add_argument(
+ "--frame_save_path",
+ type=str,
+ help="Path to save frames (frame number added automatically, as well as .jpg extension)",
+ )
+ parser.add_argument(
+ "--disable_save",
+ action="store_true",
+ help="Whether to disable saving log of replayed trajectory, used for validation.",
+ )
+ parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
+ parser.add_argument(
+ "--mode",
+ type=str,
+ choices=["headless", "vr", "simple"],
+ help="Whether to disable replay through VR and use iggui instead.",
+ )
+ return parser.parse_args()
+
+
+def replay_demo(
+ in_log_path,
+ out_log_path=None,
+ disable_save=False,
+ frame_save_path=None,
+ verbose=True,
+ mode="headless",
+ start_callbacks=[],
+ step_callbacks=[],
+ end_callbacks=[],
+ profile=False,
+):
+ """
+ Replay a BEHAVIOR demo.
+
+ Note that this returns, but does not check for determinism. Use safe_replay_demo to assert for determinism
+ when using in scenarios where determinism is important.
+
+ @param in_log_path: the path of the BEHAVIOR demo log to replay.
+ @param out_log_path: the path of the new BEHAVIOR demo log to save from the replay.
+ @param frame_save_path: the path to save frame images to. None to disable frame image saving.
+ @param mode: which rendering mode ("headless", "simple", "vr"). In simple mode, the demo will be replayed with simple robot view.
+ @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled.
+ @param profile: Whether the replay should be profiled, with profiler output to stdout.
+ @param start_callback: A callback function that will be called immediately before starting to replay steps. Should
+ take a single argument, an iGBEHAVIORActivityInstance.
+ @param step_callback: A callback function that will be called immediately following each replayed step. Should
+ take a single argument, an iGBEHAVIORActivityInstance.
+ @param end_callback: A callback function that will be called when replay has finished. Should take a single
+ argument, an iGBEHAVIORActivityInstance.
+ @return if disable_save is True, returns None. Otherwise, returns a boolean indicating if replay was deterministic.
+ """
+ # HDR files for PBR rendering
+ hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
+ hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
+ light_modulation_map_filename = os.path.join(
+ igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
+ )
+ background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
+
+ # VR rendering settings
+ vr_rendering_settings = MeshRendererSettings(
+ optimized=True,
+ fullscreen=False,
+ env_texture_filename=hdr_texture,
+ env_texture_filename2=hdr_texture2,
+ env_texture_filename3=background_texture,
+ light_modulation_map_filename=light_modulation_map_filename,
+ enable_shadow=True,
+ enable_pbr=True,
+ msaa=False,
+ light_dimming_factor=1.0,
+ )
+
+ # Check mode
+ assert mode in ["headless", "vr", "simple"]
+
+ # Initialize settings to save action replay frames
+ vr_settings = VrSettings(config_str=IGLogReader.read_metadata_attr(in_log_path, "/metadata/vr_settings"))
+ vr_settings.set_frame_save_path(frame_save_path)
+
+ physics_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/physics_timestep")
+ render_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/render_timestep")
+ filter_objects = IGLogReader.read_metadata_attr(in_log_path, "/metadata/filter_objects")
+
+ scene_id = "Rs_int"
+ # VR system settings
+ s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True))
+
+ scene = InteractiveIndoorScene(
+ scene_id, load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"]
+ )
+
+ vr_agent = BehaviorRobot(s)
+ s.import_ig_scene(scene)
+ s.import_behavior_robot(vr_agent)
+ s.register_main_vr_robot(vr_agent)
+ vr_agent.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1])
+
+ log_reader = IGLogReader(in_log_path, log_status=False)
+
+ log_writer = None
+ if not disable_save:
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ if out_log_path == None:
+ out_log_path = "{}_{}_replay.hdf5".format(scene_id, timestamp)
+
+ log_writer = IGLogWriter(
+ s,
+ log_filepath=out_log_path,
+ store_vr=False,
+ vr_robot=vr_agent,
+ profiling_mode=profile,
+ filter_objects=filter_objects,
+ )
+ log_writer.set_up_data_storage()
+
+ for callback in start_callbacks:
+ callback(vr_agent, log_reader)
+
+ task_done = False
+ satisfied_predicates = {}
+ while log_reader.get_data_left_to_read():
+
+ s.step(print_stats=profile)
+
+ # Set camera each frame
+ if mode == "vr":
+ log_reader.set_replay_camera(s)
+
+ for callback in step_callbacks:
+ callback(vr_agent, log_reader)
+
+ # Get relevant VR action data and update VR agent
+ vr_agent.apply_action(log_reader.get_agent_action("vr_robot"))
+
+ if not disable_save:
+ log_writer.process_frame()
+
+ demo_statistics = {}
+ for callback in end_callbacks:
+ callback(vr_agent, log_reader)
+
+ s.disconnect()
+
+ demo_statistics = {
+ "task_done": task_done,
+ "satisfied_predicates": satisfied_predicates,
+ "total_frame_num": log_reader.total_frame_num,
+ }
+ return demo_statistics
+
+
+def main():
+ args = parse_args()
+ agent_metrics = AgentMetric()
+ replay_demo(
+ args.vr_log_path,
+ out_log_path=args.vr_replay_log_path,
+ step_callbacks=[agent_metrics.step_callback],
+ disable_save=args.disable_save,
+ frame_save_path=args.frame_save_path,
+ mode=args.mode,
+ profile=args.profile,
+ )
+ import pdb
+
+ pdb.set_trace()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_debug_vr.py b/igibson/examples/behavior/behavior_debug_vr.py
new file mode 100644
index 000000000..0e60af2d8
--- /dev/null
+++ b/igibson/examples/behavior/behavior_debug_vr.py
@@ -0,0 +1,104 @@
+"""
+Debugging script to bypass taksnet
+"""
+
+import copy
+import datetime
+import os
+
+import numpy as np
+
+import igibson
+from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
+from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
+from igibson.robots.behavior_robot import BehaviorRobot
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+from igibson.utils.ig_logging import IGLogWriter
+
+POST_TASK_STEPS = 200
+PHYSICS_WARMING_STEPS = 200
+
+
+def main():
+ collect_demo(scene_id="Rs_int")
+
+
+def collect_demo(scene_id, vr_log_path=None, disable_save=False, no_vr=False, profile=False):
+ # HDR files for PBR rendering
+ hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
+ hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
+ light_modulation_map_filename = os.path.join(
+ igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
+ )
+ background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
+
+ # VR rendering settings
+ vr_rendering_settings = MeshRendererSettings(
+ optimized=True,
+ fullscreen=False,
+ env_texture_filename=hdr_texture,
+ env_texture_filename2=hdr_texture2,
+ env_texture_filename3=background_texture,
+ light_modulation_map_filename=light_modulation_map_filename,
+ enable_shadow=True,
+ enable_pbr=True,
+ msaa=False,
+ light_dimming_factor=1.0,
+ )
+
+ s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True))
+
+ scene = InteractiveIndoorScene(
+ scene_id, load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"]
+ )
+
+ vr_agent = BehaviorRobot(s)
+ s.import_ig_scene(scene)
+ s.import_behavior_robot(vr_agent)
+ s.register_main_vr_robot(vr_agent)
+ vr_agent.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1])
+
+ # VR system settings
+
+ log_writer = None
+ if not disable_save:
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ if vr_log_path is None:
+ vr_log_path = "{}_{}_{}.hdf5".format("behavior_dummy_demo", scene_id, timestamp)
+ log_writer = IGLogWriter(
+ s,
+ log_filepath=vr_log_path,
+ task=None,
+ store_vr=False if no_vr else True,
+ vr_robot=vr_agent,
+ profiling_mode=profile,
+ filter_objects=False,
+ )
+ log_writer.set_up_data_storage()
+
+ physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)
+
+ steps = 2000
+ for _ in range(steps):
+
+ s.step()
+ action = s.gen_vr_robot_action()
+ if steps < physics_warming_steps:
+ action = np.zeros_like(action)
+
+ vr_agent.apply_action(action)
+
+ if log_writer and not disable_save:
+ log_writer.process_frame()
+
+ steps += 1
+
+ if log_writer and not disable_save:
+ log_writer.end_log_session()
+
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_demo_batch.py b/igibson/examples/behavior/behavior_demo_batch.py
new file mode 100644
index 000000000..3ff53d0db
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_batch.py
@@ -0,0 +1,83 @@
+"""
+BEHAVIOR demo batch analysis script
+"""
+
+import json
+import logging
+import os
+from pathlib import Path
+
+import bddl
+import pandas as pd
+
+from igibson.examples.behavior.behavior_demo_replay import replay_demo
+
+
+def behavior_demo_batch(
+ demo_root, log_manifest, out_dir, get_callbacks_callback, skip_existing=True, save_frames=False
+):
+ """
+ Execute replay analysis functions (provided through callbacks) on a batch of BEHAVIOR demos.
+
+ @param demo_root: Directory containing the demo files listed in the manifests.
+ @param log_manifest: The manifest file containing list of BEHAVIOR demos to batch over.
+ @param out_dir: Directory to store results in.
+ @param get_callbacks_callback: A function that will be called for each demo that needs to return
+ a four-tuple: (start_callbacks, step_callbacks, end_callbacks, data_callbacks). Each of the
+ the first three callback function sets need to be compatible with the behavior_demo_replay
+ API and will be used for this purpose for that particular demo. The data callbacks should
+ take no arguments and return a dictionary to be included in the demo's replay data that will
+ be saved in the end.
+ @param skip_existing: Whether demos with existing output logs should be skipped.
+ @param save_frames: Whether the demo's frames should be saved alongside statistics.
+ """
+ logger = logging.getLogger()
+ logger.disabled = True
+
+ bddl.set_backend("iGibson")
+
+ demo_list = pd.read_csv(log_manifest)
+
+ for idx, demo in enumerate(demo_list["demos"]):
+ if "replay" in demo:
+ continue
+
+ demo_name = os.path.splitext(demo)[0]
+ demo_path = os.path.join(demo_root, demo)
+ replay_path = os.path.join(out_dir, demo_name + "_replay.hdf5")
+ log_path = os.path.join(out_dir, demo_name + ".json")
+
+ if skip_existing and os.path.exists(log_path):
+ print("Skipping existing demo: {}, {} out of {}".format(demo, idx, len(demo_list["demos"])))
+ continue
+
+ print("Replaying demo: {}, {} out of {}".format(demo, idx, len(demo_list["demos"])))
+
+ curr_frame_save_path = None
+ if save_frames:
+ curr_frame_save_path = os.path.join(out_dir, demo_name + ".mp4")
+
+ try:
+ start_callbacks, step_callbacks, end_callbacks, data_callbacks = get_callbacks_callback()
+ demo_information = replay_demo(
+ in_log_path=demo_path,
+ out_log_path=replay_path,
+ frame_save_path=curr_frame_save_path,
+ start_callbacks=start_callbacks,
+ step_callbacks=step_callbacks,
+ end_callbacks=end_callbacks,
+ mode="headless",
+ verbose=False,
+ )
+ demo_information["failed"] = False
+ demo_information["filename"] = Path(demo).name
+
+ for callback in data_callbacks:
+ demo_information.update(callback())
+
+ except Exception as e:
+ print("Demo failed withe error: ", e)
+ demo_information = {"demo_id": Path(demo).name, "failed": True, "failure_reason": str(e)}
+
+ with open(log_path, "w") as file:
+ json.dump(demo_information, file)
diff --git a/igibson/examples/behavior/behavior_demo_checkpoints.py b/igibson/examples/behavior/behavior_demo_checkpoints.py
new file mode 100644
index 000000000..727f59090
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_checkpoints.py
@@ -0,0 +1,34 @@
+"""Save checkpoints from a BEHAVIOR demo."""
+import os
+
+import bddl
+
+import igibson
+from igibson.examples.demo.vr_demos.atus.behavior_demo_replay import safe_replay_demo
+from igibson.utils.checkpoint_utils import save_checkpoint
+
+bddl.set_backend("iGibson")
+
+
+def create_checkpoints(demo_file, checkpoint_directory, checkpoint_every_n_steps):
+ # Create a step callback function to feed replay steps into checkpoints.
+ def step_callback(igbhvr_act_inst):
+ if (
+ not igbhvr_act_inst.current_success
+ and igbhvr_act_inst.simulator.frame_count % checkpoint_every_n_steps == 0
+ ):
+ save_checkpoint(igbhvr_act_inst.simulator, checkpoint_directory)
+
+ safe_replay_demo(demo_file, mode="headless", step_callback=step_callback)
+
+
+def main():
+ demo_file = os.path.join(igibson.ig_dataset_path, "tests", "storing_food_0_Rs_int_2021-05-31_11-49-30.hdf5")
+ checkpoint_directory = "checkpoints"
+ if not os.path.exists(checkpoint_directory):
+ os.mkdir(checkpoint_directory)
+ create_checkpoints(demo_file, checkpoint_directory, 50)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_demo_collection.py b/igibson/examples/behavior/behavior_demo_collection.py
new file mode 100644
index 000000000..613f277ee
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_collection.py
@@ -0,0 +1,218 @@
+"""
+Main BEHAVIOR demo collection entrypoint
+"""
+
+import argparse
+import copy
+import datetime
+import os
+
+import bddl
+import numpy as np
+
+import igibson
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
+from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
+from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings
+from igibson.simulator import Simulator
+from igibson.utils.ig_logging import IGLogWriter
+
+POST_TASK_STEPS = 200
+PHYSICS_WARMING_STEPS = 200
+
+
+def parse_args():
+ scene_choices = [
+ "Beechwood_0_int",
+ "Beechwood_1_int",
+ "Benevolence_0_int",
+ "Benevolence_1_int",
+ "Benevolence_2_int",
+ "Ihlen_0_int",
+ "Ihlen_1_int",
+ "Merom_0_int",
+ "Merom_1_int",
+ "Pomaria_0_int",
+ "Pomaria_1_int",
+ "Pomaria_2_int",
+ "Rs_int",
+ "Wainscott_0_int",
+ "Wainscott_1_int",
+ ]
+
+ task_id_choices = [0, 1]
+ parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
+ parser.add_argument(
+ "--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl."
+ )
+ parser.add_argument(
+ "--task_id",
+ type=int,
+ required=True,
+ choices=task_id_choices,
+ nargs="?",
+ help="BDDL integer ID, matching suffix of bddl.",
+ )
+ parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log")
+ parser.add_argument(
+ "--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes."
+ )
+ parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.")
+ parser.add_argument(
+ "--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches."
+ )
+ parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
+ parser.add_argument(
+ "--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions."
+ )
+ parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.")
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ bddl.set_backend("iGibson")
+ collect_demo(
+ args.task,
+ args.task_id,
+ args.scene,
+ args.vr_log_path,
+ args.disable_save,
+ args.max_steps,
+ args.no_vr,
+ args.disable_scene_cache,
+ args.profile,
+ )
+
+
+def collect_demo(
+ task,
+ task_id,
+ scene,
+ vr_log_path=None,
+ disable_save=False,
+ max_steps=-1,
+ no_vr=False,
+ disable_scene_cache=False,
+ profile=False,
+):
+ # HDR files for PBR rendering
+ hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
+ hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
+ light_modulation_map_filename = os.path.join(
+ igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
+ )
+ background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
+
+ # VR rendering settings
+ vr_rendering_settings = MeshRendererSettings(
+ optimized=True,
+ fullscreen=False,
+ env_texture_filename=hdr_texture,
+ env_texture_filename2=hdr_texture2,
+ env_texture_filename3=background_texture,
+ light_modulation_map_filename=light_modulation_map_filename,
+ enable_shadow=True,
+ enable_pbr=True,
+ msaa=False,
+ light_dimming_factor=1.0,
+ )
+
+ # VR system settings
+ mode = "headless" if no_vr else "vr"
+ s = Simulator(
+ mode=mode,
+ rendering_settings=vr_rendering_settings,
+ vr_settings=VrSettings(use_vr=True),
+ physics_timestep=1 / 300.0,
+ render_timestep=1 / 30.0,
+ )
+ igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)
+
+ scene_kwargs = None
+ online_sampling = True
+
+ if not disable_scene_cache:
+ scene_kwargs = {
+ "urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id),
+ }
+ online_sampling = False
+
+ igbhvr_act_inst.initialize_simulator(
+ simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling
+ )
+ vr_agent = igbhvr_act_inst.simulator.robots[0]
+
+ if not no_vr:
+ vr_cs = VrConditionSwitcher(
+ igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction
+ )
+
+ log_writer = None
+ if not disable_save:
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ if vr_log_path is None:
+ vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp)
+ log_writer = IGLogWriter(
+ s,
+ log_filepath=vr_log_path,
+ task=igbhvr_act_inst,
+ store_vr=False if no_vr else True,
+ vr_robot=vr_agent,
+ profiling_mode=profile,
+ filter_objects=True,
+ )
+ log_writer.set_up_data_storage()
+
+ satisfied_predicates_cached = {}
+ post_task_steps = copy.deepcopy(POST_TASK_STEPS)
+ physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)
+
+ steps = 0
+ while max_steps < 0 or steps < max_steps:
+ igbhvr_act_inst.simulator.step(print_stats=profile)
+ task_done, satisfied_predicates = igbhvr_act_inst.check_success()
+
+ if no_vr:
+ if steps < 2:
+ action = np.zeros((28,))
+ action[19] = 1
+ action[27] = 1
+ else:
+ action = np.random.uniform(-0.01, 0.01, size=(28,))
+ else:
+ action = igbhvr_act_inst.simulator.gen_vr_robot_action()
+ if steps < physics_warming_steps:
+ action = np.zeros_like(action)
+
+ vr_agent.apply_action(action)
+
+ if not no_vr:
+ if satisfied_predicates != satisfied_predicates_cached:
+ vr_cs.refresh_condition(switch=False)
+ satisfied_predicates_cached = satisfied_predicates
+
+ if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"):
+ vr_cs.refresh_condition()
+
+ if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"):
+ vr_cs.toggle_show_state()
+
+ if log_writer and not disable_save:
+ log_writer.process_frame()
+
+ if task_done:
+ post_task_steps -= 1
+ if post_task_steps == 0:
+ break
+
+ steps += 1
+
+ if log_writer and not disable_save:
+ log_writer.end_log_session()
+
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_demo_metrics_batch.py b/igibson/examples/behavior/behavior_demo_metrics_batch.py
new file mode 100644
index 000000000..2837dcb83
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_metrics_batch.py
@@ -0,0 +1,35 @@
+import argparse
+
+from igibson.examples.behavior.behavior_demo_batch import behavior_demo_batch
+from igibson.metrics.agent import AgentMetric
+from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement
+from igibson.metrics.gaze import GazeMetric
+from igibson.metrics.task import TaskMetric
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Collect metrics from BEHAVIOR demos in manifest.")
+ parser.add_argument("demo_root", type=str, help="Directory containing demos listed in the manifest.")
+ parser.add_argument("log_manifest", type=str, help="Plain text file consisting of list of demos to replay.")
+ parser.add_argument("out_dir", type=str, help="Directory to store results in.")
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+
+ def get_metrics_callbacks():
+ metrics = [KinematicDisarrangement(), LogicalDisarrangement(), AgentMetric(), GazeMetric(), TaskMetric()]
+
+ return (
+ [metric.start_callback for metric in metrics],
+ [metric.step_callback for metric in metrics],
+ [metric.end_callback for metric in metrics],
+ [metric.gather_results for metric in metrics],
+ )
+
+ behavior_demo_batch(args.demo_root, args.log_manifest, args.out_dir, get_metrics_callbacks)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/learning/demo_replaying_example.py b/igibson/examples/behavior/behavior_demo_replay.py
similarity index 62%
rename from igibson/examples/learning/demo_replaying_example.py
rename to igibson/examples/behavior/behavior_demo_replay.py
index ef86267d4..5f0e374da 100644
--- a/igibson/examples/learning/demo_replaying_example.py
+++ b/igibson/examples/behavior/behavior_demo_replay.py
@@ -1,36 +1,24 @@
+"""
+Main BEHAVIOR demo replay entrypoint
+"""
+
import argparse
import datetime
-import logging
import os
import pprint
+import bddl
import h5py
import numpy as np
import igibson
-from igibson.envs.igibson_env import iGibsonEnv
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
+from igibson.simulator import Simulator
from igibson.utils.git_utils import project_git_info
from igibson.utils.ig_logging import IGLogReader, IGLogWriter
-from igibson.utils.utils import parse_config, parse_str_config
-
-
-def main():
- """
- Example of how to replay a previously recorded demo of a task
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- args = parse_args()
- replay_demo(
- args.log_path,
- out_log_path=args.replay_log_path,
- disable_save=args.disable_save,
- frame_save_path=args.frame_save_path,
- mode=args.mode,
- profile=args.profile,
- config_file=args.config,
- )
+from igibson.utils.utils import parse_str_config
def verify_determinism(in_log_path, out_log_path):
@@ -40,22 +28,18 @@ def verify_determinism(in_log_path, out_log_path):
for attribute in original_file["physics_data"][obj]:
is_close = np.isclose(
original_file["physics_data"][obj][attribute], new_file["physics_data"][obj][attribute]
- )
- is_deterministic = is_deterministic and is_close.all()
- if not is_close.all():
- logging.info(
- "Mismatch for obj {} with mismatched attribute {} starting at timestep {}".format(
- obj, attribute, np.where(is_close == False)[0][0]
- )
- )
+ ).all()
+ is_deterministic = is_deterministic and is_close
+ if not is_close:
+ print("Mismatch for obj {} with mismatched attribute {}".format(obj, attribute))
return bool(is_deterministic)
def parse_args():
parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
- parser.add_argument("--log_path", type=str, required=True, help="Path (and filename) of log to replay")
+ parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log to replay")
parser.add_argument(
- "--replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)"
+ "--vr_replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)"
)
parser.add_argument(
"--frame_save_path",
@@ -71,14 +55,8 @@ def parse_args():
parser.add_argument(
"--mode",
type=str,
- default="gui_non_interactive",
- choices=["headless", "headless_tensor", "vr", "gui_non_interactive"],
- help="Mode to run simulator in",
- )
- parser.add_argument(
- "--config",
- help="which config file to use [default: use yaml files in examples/configs]",
- default=os.path.join(igibson.example_config_path, "behavior_vr.yaml"),
+ choices=["headless", "vr", "simple"],
+ help="Whether to disable replay through VR and use iggui instead.",
)
return parser.parse_args()
@@ -90,37 +68,29 @@ def replay_demo(
frame_save_path=None,
verbose=True,
mode="headless",
- config_file=os.path.join(igibson.example_config_path, "behavior_vr.yaml"),
start_callbacks=[],
step_callbacks=[],
end_callbacks=[],
profile=False,
- image_size=(1280, 720),
- use_pb_gui=False,
):
"""
- Replay a demo of a task.
+ Replay a BEHAVIOR demo.
Note that this returns, but does not check for determinism. Use safe_replay_demo to assert for determinism
when using in scenarios where determinism is important.
@param in_log_path: the path of the BEHAVIOR demo log to replay.
@param out_log_path: the path of the new BEHAVIOR demo log to save from the replay.
- @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled.
@param frame_save_path: the path to save frame images to. None to disable frame image saving.
- @param verbose: Whether to print out git diff in detail
- @param mode: which rendering mode ("headless", "headless_tensor", "gui_non_interactive", "vr"). In gui_non_interactive
- mode, the demo will be replayed with simple robot view.
- @param config_file: environment config file
+ @param mode: which rendering mode ("headless", "simple", "vr"). In simple mode, the demo will be replayed with simple robot view.
+ @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled.
+ @param profile: Whether the replay should be profiled, with profiler output to stdout.
@param start_callback: A callback function that will be called immediately before starting to replay steps. Should
- take two arguments: iGibsonEnv and IGLogReader
+ take a single argument, an iGBEHAVIORActivityInstance.
@param step_callback: A callback function that will be called immediately following each replayed step. Should
- take two arguments: iGibsonEnv and IGLogReader
- @param end_callback: A callback function that will be called when replay has finished. Should
- take two arguments: iGibsonEnv and IGLogReader
- @param profile: Whether the replay should be profiled, with profiler output to stdout.
- @param image_size: The image size that should be used by the renderer.
- @param use_pb_gui: display the interactive pybullet gui (for debugging)
+ take a single argument, an iGBEHAVIORActivityInstance.
+ @param end_callback: A callback function that will be called when replay has finished. Should take a single
+ argument, an iGBEHAVIORActivityInstance.
@return if disable_save is True, returns None. Otherwise, returns a boolean indicating if replay was deterministic.
"""
# HDR files for PBR rendering
@@ -132,7 +102,7 @@ def replay_demo(
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
# VR rendering settings
- rendering_setting = MeshRendererSettings(
+ vr_rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
@@ -146,7 +116,7 @@ def replay_demo(
)
# Check mode
- assert mode in ["headless", "headless_tensor", "vr", "gui_non_interactive"]
+ assert mode in ["headless", "vr", "simple"]
# Initialize settings to save action replay frames
vr_settings = VrSettings(config_str=IGLogReader.read_metadata_attr(in_log_path, "/metadata/vr_settings"))
@@ -158,14 +128,6 @@ def replay_demo(
physics_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/physics_timestep")
render_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/render_timestep")
filter_objects = IGLogReader.read_metadata_attr(in_log_path, "/metadata/filter_objects")
- instance_id = IGLogReader.read_metadata_attr(in_log_path, "/metadata/instance_id")
- urdf_file = IGLogReader.read_metadata_attr(in_log_path, "/metadata/urdf_file")
-
- if urdf_file is None:
- urdf_file = "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id)
-
- if instance_id is None:
- instance_id = 0
logged_git_info = IGLogReader.read_metadata_attr(in_log_path, "/metadata/git_info")
logged_git_info = parse_str_config(logged_git_info)
@@ -189,80 +151,82 @@ def replay_demo(
print("Current git info:\n")
pp.pprint(git_info[key])
- config = parse_config(config_file)
- config["task"] = task
- config["task_id"] = task_id
- config["scene_id"] = scene
- config["instance_id"] = instance_id
- config["urdf_file"] = urdf_file
- config["image_width"] = image_size[0]
- config["image_height"] = image_size[1]
- config["online_sampling"] = False
-
- env = iGibsonEnv(
- config_file=config,
+ # VR system settings
+ s = Simulator(
mode=mode,
- action_timestep=render_timestep,
physics_timestep=physics_timestep,
- rendering_settings=rendering_setting,
+ render_timestep=render_timestep,
+ rendering_settings=vr_rendering_settings,
vr_settings=vr_settings,
- use_pb_gui=use_pb_gui,
+ image_width=1280,
+ image_height=720,
)
- env.reset()
- robot = env.robots[0]
+ igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)
+ igbhvr_act_inst.initialize_simulator(
+ simulator=s,
+ scene_id=scene,
+ scene_kwargs={
+ "urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id),
+ },
+ load_clutter=True,
+ online_sampling=False,
+ )
+ vr_agent = igbhvr_act_inst.simulator.robots[0]
log_reader = IGLogReader(in_log_path, log_status=False)
+
log_writer = None
if not disable_save:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
- if out_log_path is None:
- out_log_path = "{}_{}_{}_{}_{}_replay.hdf5".format(task, task_id, scene, instance_id, timestamp)
+ if out_log_path == None:
+ out_log_path = "{}_{}_{}_{}_replay.hdf5".format(task, task_id, scene, timestamp)
log_writer = IGLogWriter(
- env.simulator,
+ s,
log_filepath=out_log_path,
- task=env.task,
+ task=igbhvr_act_inst,
store_vr=False,
- vr_robot=robot,
+ vr_robot=vr_agent,
profiling_mode=profile,
filter_objects=filter_objects,
)
log_writer.set_up_data_storage()
- try:
- for callback in start_callbacks:
- callback(env, log_reader)
-
- task_done = False
+ for callback in start_callbacks:
+ callback(igbhvr_act_inst, log_reader)
- while log_reader.get_data_left_to_read():
- env.step(log_reader.get_agent_action("vr_robot"))
- task_done |= env.task.check_success()[0]
+ task_done = False
+ while log_reader.get_data_left_to_read():
- # Set camera each frame
- if mode == "vr":
- log_reader.set_replay_camera(env.simulator)
+ igbhvr_act_inst.simulator.step(print_stats=profile)
+ task_done, _ = igbhvr_act_inst.check_success()
- for callback in step_callbacks:
- callback(env, log_reader)
+ # Set camera each frame
+ if mode == "vr":
+ log_reader.set_replay_camera(s)
- if not disable_save:
- log_writer.process_frame()
+ for callback in step_callbacks:
+ callback(igbhvr_act_inst, log_reader)
- logging.info("Demo was successfully completed: {}".format(task_done))
+ # Get relevant VR action data and update VR agent
+ vr_agent.apply_action(log_reader.get_agent_action("vr_robot"))
- demo_statistics = {}
- for callback in end_callbacks:
- callback(env, log_reader)
- finally:
- env.close()
if not disable_save:
- log_writer.end_log_session()
+ log_writer.process_frame()
+
+ print("Demo was succesfully completed: ", task_done)
+
+ demo_statistics = {}
+ for callback in end_callbacks:
+ callback(igbhvr_act_inst, log_reader)
+
+ s.disconnect()
is_deterministic = None
if not disable_save:
+ log_writer.end_log_session()
is_deterministic = verify_determinism(in_log_path, out_log_path)
- logging.info("Demo was deterministic: {}".format(is_deterministic))
+ print("Demo was deterministic: ", is_deterministic)
demo_statistics = {
"deterministic": is_deterministic,
@@ -283,5 +247,18 @@ def safe_replay_demo(*args, **kwargs):
), "Replay was not deterministic (or was executed with disable_save=True)."
+def main():
+ args = parse_args()
+ bddl.set_backend("iGibson")
+ replay_demo(
+ args.vr_log_path,
+ out_log_path=args.vr_replay_log_path,
+ disable_save=args.disable_save,
+ frame_save_path=args.frame_save_path,
+ mode=args.mode,
+ profile=args.profile,
+ )
+
+
if __name__ == "__main__":
main()
diff --git a/igibson/examples/behavior/behavior_demo_replay_rl.py b/igibson/examples/behavior/behavior_demo_replay_rl.py
new file mode 100644
index 000000000..c380721e4
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_replay_rl.py
@@ -0,0 +1,279 @@
+"""
+BEHAVIOR RL episodes replay entrypoint
+"""
+
+import argparse
+import datetime
+import os
+import pprint
+
+import bddl
+import h5py
+import numpy as np
+
+import igibson
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
+from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
+from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
+from igibson.simulator import Simulator
+from igibson.utils.git_utils import project_git_info
+from igibson.utils.ig_logging import IGLogReader, IGLogWriter
+from igibson.utils.utils import parse_str_config
+
+
+def verify_determinism(in_log_path, out_log_path):
+ is_deterministic = True
+ with h5py.File(in_log_path) as original_file, h5py.File(out_log_path) as new_file:
+ for obj in original_file["physics_data"]:
+ for attribute in original_file["physics_data"][obj]:
+ is_close = np.isclose(
+ original_file["physics_data"][obj][attribute], new_file["physics_data"][obj][attribute]
+ ).all()
+ is_deterministic = is_deterministic and is_close
+ if not is_close:
+ print("Mismatch for obj {} with mismatched attribute {}".format(obj, attribute))
+ return bool(is_deterministic)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
+ parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log to replay")
+ parser.add_argument(
+ "--vr_replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)"
+ )
+ parser.add_argument(
+ "--frame_save_path",
+ type=str,
+ help="Path to save frames (frame number added automatically, as well as .jpg extension)",
+ )
+ parser.add_argument(
+ "--disable_save",
+ action="store_true",
+ help="Whether to disable saving log of replayed trajectory, used for validation.",
+ )
+ parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
+ parser.add_argument(
+ "--mode",
+ type=str,
+ choices=["headless", "vr", "simple"],
+ help="Whether to disable replay through VR and use iggui instead.",
+ )
+ return parser.parse_args()
+
+
+def replay_demo(
+ in_log_path,
+ out_log_path=None,
+ disable_save=False,
+ frame_save_path=None,
+ verbose=True,
+ mode="headless",
+ start_callbacks=[],
+ step_callbacks=[],
+ end_callbacks=[],
+ profile=False,
+):
+ """
+ Replay a BEHAVIOR demo.
+
+ Note that this returns, but does not check for determinism. Use safe_replay_demo to assert for determinism
+ when using in scenarios where determinism is important.
+
+ @param in_log_path: the path of the BEHAVIOR demo log to replay.
+ @param out_log_path: the path of the new BEHAVIOR demo log to save from the replay.
+ @param frame_save_path: the path to save frame images to. None to disable frame image saving.
+ @param mode: which rendering mode ("headless", "simple", "vr"). In simple mode, the demo will be replayed with simple robot view.
+ @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled.
+ @param profile: Whether the replay should be profiled, with profiler output to stdout.
+ @param start_callback: A callback function that will be called immediately before starting to replay steps. Should
+ take a single argument, an iGBEHAVIORActivityInstance.
+ @param step_callback: A callback function that will be called immediately following each replayed step. Should
+ take a single argument, an iGBEHAVIORActivityInstance.
+ @param end_callback: A callback function that will be called when replay has finished. Should take a single
+ argument, an iGBEHAVIORActivityInstance.
+ @return if disable_save is True, returns None. Otherwise, returns a boolean indicating if replay was deterministic.
+ """
+ # HDR files for PBR rendering
+ hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
+ hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
+ light_modulation_map_filename = os.path.join(
+ igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
+ )
+ background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
+
+ # VR rendering settings
+ vr_rendering_settings = MeshRendererSettings(
+ optimized=True,
+ fullscreen=False,
+ env_texture_filename=hdr_texture,
+ env_texture_filename2=hdr_texture2,
+ env_texture_filename3=background_texture,
+ light_modulation_map_filename=light_modulation_map_filename,
+ enable_shadow=True,
+ enable_pbr=True,
+ msaa=False,
+ light_dimming_factor=1.0,
+ )
+
+ # Check mode
+ assert mode in ["headless", "vr", "simple"]
+
+ # Initialize settings to save action replay frames
+ vr_settings = VrSettings(config_str=IGLogReader.read_metadata_attr(in_log_path, "/metadata/vr_settings"))
+ vr_settings.set_frame_save_path(frame_save_path)
+
+ task = IGLogReader.read_metadata_attr(in_log_path, "/metadata/atus_activity")
+ task_id = IGLogReader.read_metadata_attr(in_log_path, "/metadata/activity_definition")
+ scene = IGLogReader.read_metadata_attr(in_log_path, "/metadata/scene_id")
+ physics_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/physics_timestep")
+ render_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/render_timestep")
+ filter_objects = IGLogReader.read_metadata_attr(in_log_path, "/metadata/filter_objects")
+
+ logged_git_info = IGLogReader.read_metadata_attr(in_log_path, "/metadata/git_info")
+ logged_git_info = parse_str_config(logged_git_info)
+ git_info = project_git_info()
+ pp = pprint.PrettyPrinter(indent=4)
+
+ for key in logged_git_info.keys():
+ logged_git_info[key].pop("directory", None)
+ git_info[key].pop("directory", None)
+ if logged_git_info[key] != git_info[key] and verbose:
+ print("Warning, difference in git commits for repo: {}. This may impact deterministic replay".format(key))
+ print("Logged git info:\n")
+ pp.pprint(logged_git_info[key])
+ print("Current git info:\n")
+ pp.pprint(git_info[key])
+
+ # VR system settings
+ s = Simulator(
+ mode=mode,
+ physics_timestep=physics_timestep,
+ render_timestep=render_timestep,
+ rendering_settings=vr_rendering_settings,
+ vr_settings=vr_settings,
+ image_width=1280,
+ image_height=720,
+ )
+
+ igtn_task = iGBEHAVIORActivityInstance(task, task_id)
+ igtn_task.initialize_simulator(
+ simulator=s,
+ scene_id=scene,
+ scene_kwargs={
+ "urdf_file": "{}_neurips_task_{}_{}_0_fixed_furniture".format(scene, task, task_id),
+ },
+ load_clutter=True,
+ online_sampling=False,
+ )
+ vr_agent = igtn_task.simulator.robots[0]
+ vr_agent.activate()
+ igtn_task.reset_scene(snapshot_id=igtn_task.initial_state)
+ # set the constraints to the current poses
+ vr_agent.apply_action(np.zeros(28))
+
+ if not in_log_path:
+ raise RuntimeError("Must provide a VR log path to run action replay!")
+ log_reader = IGLogReader(in_log_path, log_status=False)
+
+ log_writer = None
+ if not disable_save:
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ if out_log_path == None:
+ out_log_path = "{}_{}_{}_{}_replay.hdf5".format(task, task_id, scene, timestamp)
+
+ log_writer = IGLogWriter(
+ s,
+ log_filepath=out_log_path,
+ task=igtn_task,
+ store_vr=False,
+ vr_robot=vr_agent,
+ profiling_mode=profile,
+ filter_objects=filter_objects,
+ )
+ log_writer.set_up_data_storage()
+
+ for callback in start_callbacks:
+ callback(igtn_task, log_reader)
+
+ task_done = False
+ while log_reader.get_data_left_to_read():
+
+ action = log_reader.get_agent_action("vr_robot")
+ # Get relevant VR action data and update VR agent
+ vr_agent.apply_action(action)
+
+ if not disable_save:
+ log_writer.process_frame()
+
+ igtn_task.simulator.step(print_stats=profile)
+ task_done, _ = igtn_task.check_success()
+
+ # Set camera each frame
+ if mode == "vr":
+ log_reader.set_replay_camera(s)
+
+ for callback in step_callbacks:
+ callback(igtn_task, log_reader)
+
+ # Per-step determinism check. Activate if necessary.
+ # things_to_compare = [thing for thing in log_writer.name_path_data if thing[0] == "physics_data"]
+ # for thing in things_to_compare:
+ # thing_path = "/".join(thing)
+ # fc = log_reader.frame_counter % log_writer.frames_before_write
+ # if fc == log_writer.frames_before_write - 1:
+ # continue
+ # replayed = log_writer.get_data_for_name_path(thing)[fc]
+ # original = log_reader.read_value(thing_path)
+ # if not np.all(replayed == original):
+ # print("%s not equal in %d" % (thing_path, log_reader.frame_counter))
+ # if not np.isclose(replayed, original).all():
+ # print("%s not close in %d" % (thing_path, log_reader.frame_counter))
+
+ print("Demo was succesfully completed: ", task_done)
+
+ demo_statistics = {}
+ for callback in end_callbacks:
+ callback(igtn_task, log_reader)
+
+ s.disconnect()
+
+ is_deterministic = None
+ if not disable_save:
+ log_writer.end_log_session()
+ is_deterministic = verify_determinism(in_log_path, out_log_path)
+ print("Demo was deterministic: ", is_deterministic)
+
+ demo_statistics = {
+ "deterministic": is_deterministic,
+ "task": task,
+ "task_id": int(task_id),
+ "scene": scene,
+ "task_done": task_done,
+ "total_frame_num": log_reader.total_frame_num,
+ }
+ return demo_statistics
+
+
+def safe_replay_demo(*args, **kwargs):
+ """Replays a demo, asserting that it was deterministic."""
+ demo_statistics = replay_demo(*args, **kwargs)
+ assert (
+ demo_statistics["deterministic"] == True
+ ), "Replay was not deterministic (or was executed with disable_save=True)."
+
+
+def main():
+ args = parse_args()
+ bddl.set_backend("iGibson")
+ replay_demo(
+ args.vr_log_path,
+ out_log_path=args.vr_replay_log_path,
+ disable_save=args.disable_save,
+ frame_save_path=args.frame_save_path,
+ mode=args.mode,
+ profile=args.profile,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_demo_segmentation.py b/igibson/examples/behavior/behavior_demo_segmentation.py
new file mode 100644
index 000000000..8c55dac87
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_segmentation.py
@@ -0,0 +1,471 @@
+import argparse
+import json
+import os
+from collections import deque, namedtuple
+from enum import Enum
+
+import bddl
+import printree
+import pyinstrument
+
+import igibson
+from igibson import object_states
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
+from igibson.activity.bddl_backend import ObjectStateBinaryPredicate, ObjectStateUnaryPredicate
+from igibson.examples.behavior import behavior_demo_replay
+from igibson.object_states import ROOM_STATES, factory
+from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState, RelativeObjectState
+from igibson.robots.behavior_robot import BRBody
+
+StateRecord = namedtuple("StateRecord", ["state_type", "objects", "value"])
+StateEntry = namedtuple("StateEntry", ["frame_count", "state_records"])
+Segment = namedtuple("DiffEntry", ["start", "duration", "end", "state_records", "sub_segments"])
+
+
+class SegmentationObjectSelection(Enum):
+ ALL_OBJECTS = 1
+ TASK_RELEVANT_OBJECTS = 2
+ ROBOTS = 3
+
+
+class SegmentationStateSelection(Enum):
+ ALL_STATES = 1
+ GOAL_CONDITION_RELEVANT_STATES = 2
+
+
+class SegmentationStateDirection(Enum):
+ BOTH_DIRECTIONS = 1
+ FALSE_TO_TRUE = 2
+ TRUE_TO_FALSE = 3
+
+
+STATE_DIRECTIONS = {
+ # Note that some of these states already only go False-to-True so they are left as BOTH_DIRECTIONS
+ # so as not to add filtering work.
+ object_states.Burnt: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Cooked: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Dusty: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Frozen: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.InFOVOfRobot: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.InHandOfRobot: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.InReachOfRobot: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.InSameRoomAsRobot: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.Inside: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.NextTo: SegmentationStateDirection.FALSE_TO_TRUE,
+ # OnFloor: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.OnTop: SegmentationStateDirection.FALSE_TO_TRUE,
+ object_states.Open: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Sliced: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Soaked: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Stained: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.ToggledOn: SegmentationStateDirection.BOTH_DIRECTIONS,
+ # Touching: SegmentationStateDirection.BOTH_DIRECTIONS,
+ object_states.Under: SegmentationStateDirection.FALSE_TO_TRUE,
+}
+STATE_DIRECTIONS.update({state: SegmentationStateDirection.FALSE_TO_TRUE for state in ROOM_STATES})
+
+ALLOWED_SUB_SEGMENTS_BY_STATE = {
+ object_states.Burnt: {object_states.OnTop, object_states.ToggledOn, object_states.Open, object_states.Inside},
+ object_states.Cooked: {object_states.OnTop, object_states.ToggledOn, object_states.Open, object_states.Inside},
+ object_states.Dusty: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ object_states.Frozen: {
+ object_states.InReachOfRobot,
+ object_states.OnTop,
+ object_states.ToggledOn,
+ object_states.Open,
+ object_states.Inside,
+ },
+ object_states.InFOVOfRobot: {},
+ object_states.InHandOfRobot: {},
+ object_states.InReachOfRobot: {},
+ object_states.InSameRoomAsRobot: {},
+ object_states.Inside: {
+ object_states.Open,
+ object_states.InSameRoomAsRobot,
+ object_states.InReachOfRobot,
+ object_states.InHandOfRobot,
+ },
+ object_states.NextTo: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ # OnFloor: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ object_states.OnTop: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ object_states.Open: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ object_states.Sliced: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ object_states.Soaked: {
+ object_states.ToggledOn,
+ object_states.InSameRoomAsRobot,
+ object_states.InReachOfRobot,
+ object_states.InHandOfRobot,
+ },
+ object_states.Stained: {
+ object_states.Soaked,
+ object_states.InSameRoomAsRobot,
+ object_states.InReachOfRobot,
+ object_states.InHandOfRobot,
+ },
+ object_states.ToggledOn: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot},
+ # Touching: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+ object_states.Under: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
+}
+
+
+def process_states(objects, state_types):
+ predicate_states = set()
+
+ for obj in objects:
+ for state_type in state_types:
+ if state_type not in obj.states:
+ continue
+
+ assert issubclass(state_type, BooleanState)
+ state = obj.states[state_type]
+ if isinstance(state, AbsoluteObjectState):
+ # Add only one instance of absolute state
+ try:
+ value = bool(state.get_value())
+ record = StateRecord(state_type, (obj,), value)
+ predicate_states.add(record)
+ except ValueError:
+ pass
+ elif isinstance(state, RelativeObjectState):
+ # Add one instance per state pair
+ for other in objects:
+ try:
+ value = state.get_value(other)
+ record = StateRecord(state_type, (obj, other), value)
+ predicate_states.add(record)
+ except ValueError:
+ pass
+ else:
+ raise ValueError("Unusable state for segmentation.")
+
+ return predicate_states
+
+
+def _get_goal_condition_states(igbhvr_act_inst: iGBEHAVIORActivityInstance):
+ state_types = set()
+
+ q = deque()
+ q.extend(igbhvr_act_inst.goal_conditions)
+
+ while q:
+ pred = q.popleft()
+ if isinstance(pred, ObjectStateUnaryPredicate) or isinstance(pred, ObjectStateBinaryPredicate):
+ state_types.add(pred.STATE_CLASS)
+
+ q.extend(pred.children)
+
+ return state_types
+
+
+class DemoSegmentationProcessor(object):
+ def __init__(
+ self,
+ state_types=None,
+ object_selection=SegmentationObjectSelection.TASK_RELEVANT_OBJECTS,
+ label_by_instance=False,
+ hierarchical=False,
+ diff_initial=False,
+ state_directions=STATE_DIRECTIONS,
+ profiler=None,
+ ):
+ self.state_history = []
+ self.last_state = None
+
+ self.state_types_option = state_types
+ self.state_types = None # To be populated in initialize().
+ self.state_directions = state_directions
+ self.object_selection = object_selection
+ self.label_by_instance = label_by_instance
+
+ self.hierarchical = hierarchical
+ self.all_state_types = None
+
+ if diff_initial:
+ self.state_history.append(StateEntry(0, set()))
+ self.last_state = set()
+
+ self.profiler = profiler
+
+ def start_callback(self, igbhvr_act_inst, _):
+ self.all_state_types = [
+ state
+ for state in factory.get_all_states()
+ if (
+ issubclass(state, BooleanState)
+ and (issubclass(state, AbsoluteObjectState) or issubclass(state, RelativeObjectState))
+ )
+ ]
+
+ if isinstance(self.state_types_option, list) or isinstance(self.state_types_option, set):
+ self.state_types = self.state_types_option
+ elif self.state_types_option == SegmentationStateSelection.ALL_STATES:
+ self.state_types = self.all_state_types
+ elif self.state_types_option == SegmentationStateSelection.GOAL_CONDITION_RELEVANT_STATES:
+ self.state_types = _get_goal_condition_states(igbhvr_act_inst)
+ else:
+ raise ValueError("Unknown segmentation state selection.")
+
+ def step_callback(self, igbhvr_act_inst, _):
+ if self.profiler:
+ self.profiler.start()
+
+ if self.object_selection == SegmentationObjectSelection.TASK_RELEVANT_OBJECTS:
+ objects = [obj for obj in igbhvr_act_inst.object_scope.values() if not isinstance(obj, BRBody)]
+ elif self.object_selection == SegmentationObjectSelection.ROBOTS:
+ objects = [obj for obj in igbhvr_act_inst.object_scope.values() if isinstance(obj, BRBody)]
+ elif self.object_selection == SegmentationObjectSelection.ALL_OBJECTS:
+ objects = igbhvr_act_inst.simulator.scene.get_objects()
+ else:
+ raise ValueError("Incorrect SegmentationObjectSelection %r" % self.object_selection)
+
+ # Get the processed state.
+ state_types_to_use = self.state_types if not self.hierarchical else self.all_state_types
+ processed_state = process_states(objects, state_types_to_use)
+ if self.last_state is None or (processed_state - self.last_state):
+ self.state_history.append(StateEntry(igbhvr_act_inst.simulator.frame_count, processed_state))
+
+ self.last_state = processed_state
+
+ if self.profiler:
+ self.profiler.stop()
+
+ def obj2str(self, obj):
+ return obj.name if self.label_by_instance else obj.category
+
+ def _hierarchical_segments(self, state_entries, state_types):
+ if not state_types:
+ return []
+
+ segments = []
+ before_idx = 0
+ after_idx = 1
+
+ # Keep iterating until we reach the end of our state entries.
+ while after_idx < len(state_entries):
+ # Get the state entries at these keys.
+ before = state_entries[before_idx]
+ after = state_entries[after_idx]
+
+ # Check if there is a valid diff at this range.
+ diffs = self.filter_diffs(after.state_records - before.state_records, state_types)
+ if diffs is not None:
+ # If there is a diff, prepare to do sub-segmentation on the segment.
+ sub_segment_states = set()
+ if self.hierarchical:
+ for state_record in diffs:
+ corresponding_sub_states = ALLOWED_SUB_SEGMENTS_BY_STATE[state_record.state_type]
+ sub_segment_states.update(corresponding_sub_states)
+
+ sub_segments = self._hierarchical_segments(
+ state_entries[before_idx : after_idx + 1], sub_segment_states
+ )
+ segments.append(
+ Segment(
+ before.frame_count,
+ after.frame_count - before.frame_count,
+ after.frame_count,
+ diffs,
+ sub_segments,
+ )
+ )
+
+ # Continue segmentation by moving the before_idx to start here.
+ before_idx = after_idx
+
+ # Increase the range of elements we're looking at by one.
+ after_idx += 1
+
+ return segments
+
+ def get_segments(self):
+ segments = self._hierarchical_segments(self.state_history, self.state_types)
+ return Segment(segments[0].start, segments[-1].end - segments[0].start, segments[-1].end, [], segments)
+
+ def filter_diffs(self, state_records, state_types):
+ """Filter the segments so that only objects in the given state directions are monitored."""
+ new_records = set()
+
+ # Go through the records in the segment.
+ for state_record in state_records:
+ # Check if the state type is on our list
+ if state_record.state_type not in state_types:
+ continue
+
+ # Check if any object in the record is on our list.
+ mode = self.state_directions[state_record.state_type]
+ accept = True
+ if mode == SegmentationStateDirection.FALSE_TO_TRUE:
+ accept = state_record.value
+ elif mode == SegmentationStateDirection.TRUE_TO_FALSE:
+ accept = not state_record.value
+
+ # If an object in our list is part of the record, keep the record.
+ if accept:
+ new_records.add(state_record)
+
+ # If we haven't kept any of this segment's records, drop the segment.
+ if not new_records:
+ return None
+
+ return new_records
+
+ def _serialize_segment(self, segment):
+ stringified_entries = [
+ {
+ "name": state_record.state_type.__name__,
+ "objects": [self.obj2str(obj) for obj in state_record.objects],
+ "value": state_record.value,
+ }
+ for state_record in segment.state_records
+ ]
+
+ return {
+ "start": segment.start,
+ "end": segment.end,
+ "duration": segment.duration,
+ "state_records": stringified_entries,
+ "sub_segments": [self._serialize_segment(sub_segment) for sub_segment in segment.sub_segments],
+ }
+
+ def _segment_to_dict_tree(self, segment, output_dict):
+ stringified_entries = [
+ (
+ state_record.state_type.__name__,
+ ", ".join(obj.category for obj in state_record.objects),
+ state_record.value,
+ )
+ for state_record in segment.state_records
+ ]
+
+ entry_strs = ["%s(%r) = %r" % entry for entry in stringified_entries]
+ key = "%d-%d: %s" % (segment.start, segment.end, ", ".join(entry_strs))
+ sub_segments = {}
+ for sub in segment.sub_segments:
+ self._segment_to_dict_tree(sub, sub_segments)
+ output_dict[key] = sub_segments
+
+ def serialize_segments(self):
+ # Make the root call to recursive function.
+ return self._serialize_segment(self.get_segments())
+
+ def __str__(self):
+ out = ""
+ out += "---------------------------------------------------\n"
+ out += "Segmentation of %s\n" % self.object_selection.name
+ out += "Considered states: %s\n" % ", ".join(x.__name__ for x in self.state_types)
+ out += "---------------------------------------------------\n"
+ output = {}
+ self._segment_to_dict_tree(self.get_segments(), output)
+ out += printree.ftree(output) + "\n"
+ out += "---------------------------------------------------\n"
+ return out
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Run segmentation on an ATUS demo.")
+ parser.add_argument(
+ "--log_path", type=str, help="Path (and filename) of log to replay. If empty, test demo will be used."
+ )
+ parser.add_argument(
+ "--out_dir", type=str, help="Directory to store results in. If empty, test directory will be used."
+ )
+ parser.add_argument(
+ "--profile",
+ action="store_true",
+ help="Whether to profile the segmentation, outputting a profile HTML in the out path.",
+ )
+ return parser.parse_args()
+
+
+def get_default_segmentation_processors(profiler=None):
+ # This applies a "flat" segmentation (e.g. not hierarchical) using only the states supported by our magic motion
+ # primitives.
+ flat_states = [
+ object_states.Open,
+ object_states.OnTop,
+ object_states.Inside,
+ object_states.InHandOfRobot,
+ object_states.InReachOfRobot,
+ ]
+ flat_object_segmentation = DemoSegmentationProcessor(
+ flat_states, SegmentationObjectSelection.TASK_RELEVANT_OBJECTS, label_by_instance=True, profiler=profiler
+ )
+
+ # This applies a hierarchical segmentation based on goal condition states. It's WIP and currently unused.
+ goal_segmentation = DemoSegmentationProcessor(
+ SegmentationStateSelection.GOAL_CONDITION_RELEVANT_STATES,
+ SegmentationObjectSelection.TASK_RELEVANT_OBJECTS,
+ hierarchical=True,
+ label_by_instance=True,
+ profiler=profiler,
+ )
+
+ # This applies a flat segmentation that allows us to see what room the agent is in during which frames.
+ room_presence_segmentation = DemoSegmentationProcessor(
+ ROOM_STATES, SegmentationObjectSelection.ROBOTS, diff_initial=True, profiler=profiler
+ )
+
+ return {
+ # "goal": goal_segmentation,
+ "flat": flat_object_segmentation,
+ "room": room_presence_segmentation,
+ }
+
+
+def main():
+ bddl.set_backend("iGibson")
+ args = parse_args()
+
+ # Select the demo to apply segmentation on.
+ demo_file = os.path.join(igibson.ig_dataset_path, "tests", "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5")
+ if args.log_path:
+ demo_file = args.log_path
+
+ # Select the output directory.
+ out_dir = os.path.join(igibson.ig_dataset_path, "tests", "segmentation_results")
+ if args.out_dir:
+ out_dir = args.out_dir
+
+ # Create output directory if needed.
+ if not os.path.exists(out_dir):
+ os.mkdir(out_dir)
+
+ # Set up the profiler
+ profiler = None
+ if args.profile:
+ profiler = pyinstrument.Profiler()
+
+ # Create default segmentation processors.
+ segmentation_processors = get_default_segmentation_processors(profiler)
+
+ # Run the segmentations.
+ behavior_demo_replay.safe_replay_demo(
+ demo_file,
+ start_callbacks=[sp.start_callback for sp in segmentation_processors.values()],
+ step_callbacks=[sp.step_callback for sp in segmentation_processors.values()],
+ )
+
+ demo_basename = os.path.splitext(os.path.basename(demo_file))[0]
+ for segmentation_name, segmentation_processor in segmentation_processors.items():
+ json_file = "%s_%s.json" % (demo_basename, segmentation_name)
+ json_fullpath = os.path.join(out_dir, json_file)
+ with open(json_fullpath, "w") as f:
+ json.dump(segmentation_processor.serialize_segments(), f)
+
+ # Print the segmentations.
+ combined_output = ""
+ for segmentation_processor in segmentation_processors.values():
+ combined_output += str(segmentation_processor) + "\n"
+
+ print(combined_output)
+
+ # Save profiling information.
+ if args.profile:
+ html = profiler.output_html()
+ html_path = os.path.join(out_dir, "segmentation_profile.html")
+ with open(html_path, "w") as f:
+ f.write(html)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_demo_segmentation_batch.py b/igibson/examples/behavior/behavior_demo_segmentation_batch.py
new file mode 100644
index 000000000..eda76d778
--- /dev/null
+++ b/igibson/examples/behavior/behavior_demo_segmentation_batch.py
@@ -0,0 +1,39 @@
+import argparse
+
+from igibson.examples.behavior.behavior_demo_batch import behavior_demo_batch
+from igibson.examples.behavior.behavior_demo_segmentation import get_default_segmentation_processors
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Collect metrics from BEHAVIOR demos in manifest.")
+ parser.add_argument("demo_root", type=str, help="Directory containing demos listed in the manifest.")
+ parser.add_argument("log_manifest", type=str, help="Plain text file consisting of list of demos to replay.")
+ parser.add_argument("out_dir", type=str, help="Directory to store results in.")
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+
+ def get_segmentation_callbacks():
+ # Create default segmentation processors.
+ segmentation_processors = get_default_segmentation_processors()
+
+ # Create a data callback that unifies the results from the
+ # segmentation processors.
+ def data_callback():
+ return {"segmentations": {name: sp.serialize_segments() for name, sp in segmentation_processors.items()}}
+
+ # Return all of the callbacks for a particular demo.
+ return (
+ [sp.start_callback for sp in segmentation_processors.values()],
+ [sp.step_callback for sp in segmentation_processors.values()],
+ [],
+ [data_callback],
+ )
+
+ behavior_demo_batch(args.demo_root, args.log_manifest, args.out_dir, get_segmentation_callbacks)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_env_metrics.py b/igibson/examples/behavior/behavior_env_metrics.py
new file mode 100644
index 000000000..307f6f818
--- /dev/null
+++ b/igibson/examples/behavior/behavior_env_metrics.py
@@ -0,0 +1,97 @@
+import argparse
+import json
+import os
+import parser
+
+import bddl
+
+import igibson
+from igibson.envs.behavior_env import BehaviorEnv
+from igibson.metrics.agent import BehaviorRobotMetric, FetchRobotMetric
+from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement
+from igibson.metrics.task import TaskMetric
+
+
+def get_metrics_callbacks(config):
+ metrics = [
+ KinematicDisarrangement(),
+ LogicalDisarrangement(),
+ TaskMetric(),
+ ]
+
+ robot_type = config["robot"]
+ if robot_type == "FetchGripper":
+ metrics.append(FetchRobotMetric())
+ elif robot_type == "BehaviorRobot":
+ metrics.append(BehaviorRobotMetric())
+ else:
+ Exception("Metrics only implemented for FetchGripper and BehaviorRobot")
+
+ return (
+ [metric.start_callback for metric in metrics],
+ [metric.step_callback for metric in metrics],
+ [metric.end_callback for metric in metrics],
+ [metric.gather_results for metric in metrics],
+ )
+
+
+if __name__ == "__main__":
+
+ bddl.set_backend("iGibson")
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--config",
+ "-c",
+ default=os.path.join(igibson.root_path, "examples", "configs", "behavior_onboard_sensing_fetch.yaml"),
+ help="which config file to use [default: use yaml files in examples/configs]",
+ )
+ parser.add_argument(
+ "--mode",
+ "-m",
+ choices=["headless", "simple", "gui", "iggui", "pbgui"],
+ default="simple",
+ help="which mode for simulation (default: headless)",
+ )
+ args = parser.parse_args()
+
+ env = BehaviorEnv(
+ config_file=args.config,
+ mode=args.mode,
+ action_timestep=1.0 / 10.0,
+ physics_timestep=1.0 / 120.0,
+ )
+
+ start_callbacks, step_callbacks, end_callbacks, data_callbacks = get_metrics_callbacks(env.config)
+
+ per_episode_metrics = {}
+ for callback in start_callbacks:
+ callback(env.task, None)
+
+ for episode in range(10):
+ env.reset()
+ for i in range(1000):
+ action = env.action_space.sample()
+ state, reward, done, _ = env.step(action)
+ for callback in step_callbacks:
+ callback(env.task, None)
+ if done:
+ break
+
+ for callback in end_callbacks:
+ callback(env.task, None)
+
+ metrics_summary = {}
+
+ for callback in data_callbacks:
+ metrics_summary.update(callback())
+
+ per_episode_metrics[episode] = metrics_summary
+
+ log_path = "my_igibson_run.json"
+
+ with open(log_path, "w") as file:
+ json.dump(per_episode_metrics, file)
+
+ env.close()
diff --git a/igibson/examples/behavior/behavior_generate_batch_manifest.py b/igibson/examples/behavior/behavior_generate_batch_manifest.py
new file mode 100644
index 000000000..f28a750fd
--- /dev/null
+++ b/igibson/examples/behavior/behavior_generate_batch_manifest.py
@@ -0,0 +1,32 @@
+import argparse
+import glob
+import os
+
+import numpy as np
+import pandas as pd
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Script to generate manifest for batch replay demos")
+ parser.add_argument(
+ "--demo_directory", type=str, required=True, help="Plain text file consisting of list of demos to replay"
+ )
+ parser.add_argument(
+ "--split", type=int, default=0, help="Number of times to split the manifest for distributing replay"
+ )
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ demos = glob.glob(os.path.join(args.demo_directory, "*.hdf5"))
+ filtered_demos = [demo for demo in demos if "replay" not in demo]
+ series = {"demos": filtered_demos}
+ pd.DataFrame(series).to_csv("manifest.csv")
+ if args.split > 1:
+ for idx, split in enumerate(np.array_split(filtered_demos, args.split)):
+ pd.DataFrame({"demos": split}).to_csv("manifest_{}.csv".format(idx))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/behavior/behavior_robot_related_states_demo.py b/igibson/examples/behavior/behavior_robot_related_states_demo.py
new file mode 100644
index 000000000..74990e723
--- /dev/null
+++ b/igibson/examples/behavior/behavior_robot_related_states_demo.py
@@ -0,0 +1,51 @@
+import os
+
+import bddl
+
+import igibson
+from igibson import object_states
+from igibson.examples.behavior import behavior_demo_replay
+
+bddl.set_backend("iGibson")
+
+
+def robot_states_callback(igbhvr_act_inst, _):
+ window1 = (igbhvr_act_inst.object_scope["window.n.01_1"], "kitchen")
+ window2 = (igbhvr_act_inst.object_scope["window.n.01_2"], "living room")
+ windows = [window1, window2]
+
+ for window, roomname in windows:
+ print(
+ "%s window is inFOV: %r, inSameRoom: %r, inReach: %r"
+ % (
+ roomname,
+ window.states[object_states.InFOVOfRobot].get_value(),
+ window.states[object_states.InSameRoomAsRobot].get_value(),
+ window.states[object_states.InReachOfRobot].get_value(),
+ )
+ )
+
+ rag = igbhvr_act_inst.object_scope["rag.n.01_1"]
+ print("Rag is in hand: %r" % rag.states[object_states.InHandOfRobot].get_value())
+
+ agent = igbhvr_act_inst.object_scope["agent.n.01_1"]
+ print(
+ "Agent is in kitchen: %r, living room: %r, bedroom: %r."
+ % (
+ agent.states[object_states.IsInKitchen].get_value(),
+ agent.states[object_states.IsInLivingRoom].get_value(),
+ agent.states[object_states.IsInBedroom].get_value(),
+ )
+ )
+
+
+def main():
+ DEMO_FILE = os.path.join(igibson.ig_dataset_path, "tests", "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5")
+
+ behavior_demo_replay.replay_demo(
+ DEMO_FILE, disable_save=True, step_callbacks=[robot_states_callback], mode="headless"
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/configs/behavior_full_observability.yaml b/igibson/examples/configs/behavior_full_observability.yaml
similarity index 51%
rename from igibson/configs/behavior_full_observability.yaml
rename to igibson/examples/configs/behavior_full_observability.yaml
index 5b942cada..0fa6d762a 100644
--- a/igibson/configs/behavior_full_observability.yaml
+++ b/igibson/examples/configs/behavior_full_observability.yaml
@@ -1,31 +1,44 @@
# scene
-scene: igibson
scene_id: Beechwood_1_int
clutter: false
build_graph: true
load_texture: true
pybullet_load_texture: true
-should_open_all_doors: false
+should_open_all_doors: true
# domain randomization
texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: BehaviorRobot
+robot: BehaviorRobot
# task
task: cleaning_cupboards
task_id: 0
-instance_id: 0
online_sampling: false
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
# discount factor
discount_factor: 0.99
# termination condition
+dist_tol: 0.36 # body width
max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
# sensor spec
output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
@@ -41,3 +54,12 @@ depth_high: 10.0
# sensor noise
depth_noise_rate: 0.0
scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/examples/configs/behavior_full_observability_fetch.yaml b/igibson/examples/configs/behavior_full_observability_fetch.yaml
new file mode 100644
index 000000000..6dd8d127f
--- /dev/null
+++ b/igibson/examples/configs/behavior_full_observability_fetch.yaml
@@ -0,0 +1,80 @@
+# scene
+scene_id: Beechwood_1_int
+clutter: false
+build_graph: true
+load_texture: true
+pybullet_load_texture: true
+should_open_all_doors: true
+
+# domain randomization
+texture_randomization_freq: null
+object_randomization_freq: null
+
+# robot
+robot: FetchGripper
+use_ag: true
+ag_strict_mode: true
+default_arm_pose: diagonal30
+trunk_offset: 0.085
+controller:
+ mode: pose_delta_ori # options are {pose_delta_ori, position_fixed_ori, position_compliant_ori}
+ kv_vel: -2.0
+ input_max: [1, 1, 1, 1, 1, 1]
+ input_min: [-1, -1, -1, -1, -1, -1]
+ output_max: [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]
+ output_min: [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]
+ eef_always_in_frame: false # if true, will add hard workspace left-right constraints to prevent EEF from moving off-frame in horizontal directions
+ neutral_xy: [0.25, 0] # (x,y) relative pos values taken in robot base frame from which radius limits will be computed
+ radius_limit: 0.5 # x-y reaching limit
+ height_limits: [0.2, 1.5] # min, max height limits
+
+# task
+task: cleaning_cupboards
+task_id: 0
+online_sampling: false
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
+
+# discount factor
+discount_factor: 0.99
+
+# termination condition
+dist_tol: 0.36 # body width
+max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
+
+# sensor spec
+output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
+# image
+fisheye: false
+image_width: 512
+image_height: 512
+vertical_fov: 120
+# depth
+depth_low: 0.0
+depth_high: 10.0
+
+# sensor noise
+depth_noise_rate: 0.0
+scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/configs/behavior_onboard_sensing.yaml b/igibson/examples/configs/behavior_onboard_sensing.yaml
similarity index 51%
rename from igibson/configs/behavior_onboard_sensing.yaml
rename to igibson/examples/configs/behavior_onboard_sensing.yaml
index 554c3fea2..90a6cfd95 100644
--- a/igibson/configs/behavior_onboard_sensing.yaml
+++ b/igibson/examples/configs/behavior_onboard_sensing.yaml
@@ -1,31 +1,44 @@
# scene
-scene: igibson
scene_id: Beechwood_1_int
clutter: false
build_graph: true
load_texture: true
pybullet_load_texture: true
-should_open_all_doors: false
+should_open_all_doors: true
# domain randomization
texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: BehaviorRobot
+robot: BehaviorRobot
# task
task: cleaning_cupboards
task_id: 0
-instance_id: 0
online_sampling: false
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
# discount factor
discount_factor: 0.99
# termination condition
+dist_tol: 0.36 # body width
max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
# sensor spec
output: [proprioception, rgb, highlight, depth, seg, ins_seg]
@@ -41,3 +54,12 @@ depth_high: 10.0
# sensor noise
depth_noise_rate: 0.0
scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/examples/configs/behavior_onboard_sensing_fetch.yaml b/igibson/examples/configs/behavior_onboard_sensing_fetch.yaml
new file mode 100644
index 000000000..589efc9b7
--- /dev/null
+++ b/igibson/examples/configs/behavior_onboard_sensing_fetch.yaml
@@ -0,0 +1,80 @@
+# scene
+scene_id: Beechwood_1_int
+clutter: false
+build_graph: true
+load_texture: true
+pybullet_load_texture: true
+should_open_all_doors: true
+
+# domain randomization
+texture_randomization_freq: null
+object_randomization_freq: null
+
+# robot
+robot: FetchGripper
+use_ag: true
+ag_strict_mode: true
+default_arm_pose: diagonal30
+trunk_offset: 0.085
+controller:
+ mode: pose_delta_ori # options are {pose_delta_ori, position_fixed_ori, position_compliant_ori}
+ kv_vel: -2.0
+ input_max: [1, 1, 1, 1, 1, 1]
+ input_min: [-1, -1, -1, -1, -1, -1]
+ output_max: [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]
+ output_min: [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]
+ eef_always_in_frame: false # if true, will add hard workspace left-right constraints to prevent EEF from moving off-frame in horizontal directions
+ neutral_xy: [0.25, 0] # (x,y) relative pos values taken in robot base frame from which radius limits will be computed
+ radius_limit: 0.5 # x-y reaching limit
+ height_limits: [0.2, 1.5] # min, max height limits
+
+# task
+task: cleaning_cupboards
+task_id: 0
+online_sampling: false
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
+
+# discount factor
+discount_factor: 0.99
+
+# termination condition
+dist_tol: 0.36 # body width
+max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
+
+# sensor spec
+output: [proprioception, rgb, highlight, depth, seg, ins_seg]
+# image
+fisheye: false
+image_width: 512
+image_height: 512
+vertical_fov: 120
+# depth
+depth_low: 0.0
+depth_high: 10.0
+
+# sensor noise
+depth_noise_rate: 0.0
+scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/configs/fetch_motion_planning.yaml b/igibson/examples/configs/fetch_motion_planning.yaml
similarity index 71%
rename from igibson/configs/fetch_motion_planning.yaml
rename to igibson/examples/configs/fetch_motion_planning.yaml
index e2bbfb068..fe5098b47 100644
--- a/igibson/configs/fetch_motion_planning.yaml
+++ b/igibson/examples/configs/fetch_motion_planning.yaml
@@ -14,30 +14,10 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Fetch
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- assisted_grasping_mode: null
- rigid_trunk: false
- default_trunk_offset: 0.365
- default_arm_pose: diagonal30
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- kv: 2.0
- gripper:
- name: ParallelJawGripperController
- mode: binary
- camera:
- name: JointController
- use_delta_commands: False
+robot: Fetch
+wheel_velocity: 0.8
+torso_lift_velocity: 0.8
+arm_velocity: 0.8
# task
task: room_rearrangement
@@ -97,6 +77,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/behavior_segmentation_replay.yaml b/igibson/examples/configs/fetch_reaching.yaml
similarity index 77%
rename from igibson/configs/behavior_segmentation_replay.yaml
rename to igibson/examples/configs/fetch_reaching.yaml
index efe83ed54..259397209 100644
--- a/igibson/configs/behavior_segmentation_replay.yaml
+++ b/igibson/examples/configs/fetch_reaching.yaml
@@ -1,27 +1,26 @@
# scene
scene: igibson
-scene_id: REPLACE_ME
-clutter: false
+scene_id: Rs_int
build_graph: true
load_texture: true
pybullet_load_texture: true
+trav_map_type: no_obj
+trav_map_resolution: 0.1
+trav_map_erosion: 3
should_open_all_doors: true
-# Ignore task, use EmptyScene instead
-debug: False
-
# domain randomization
texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: BehaviorRobot
+robot: Fetch
+wheel_velocity: 0.8
+torso_lift_velocity: 0.8
+arm_velocity: 0.8
# task
-task: REPLACE_ME
-task_id: REPLACE_ME
-online_sampling: False
+task: reaching_random
target_dist_min: 1.0
target_dist_max: 10.0
goal_format: polar
@@ -38,7 +37,7 @@ discount_factor: 0.99
# termination condition
dist_tol: 0.36 # body width
-max_step: 100
+max_step: 500
max_collisions_allowed: 500
# misc config
@@ -46,13 +45,13 @@ initial_pos_z_offset: 0.1
collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
# sensor spec
-output: [proprioception, task_obs]
+output: [task_obs, rgb, depth, scan]
# image
# Primesense Carmine 1.09 short-range RGBD sensor
# http://xtionprolive.com/primesense-carmine-1.09
fisheye: false
-image_width: 512
-image_height: 512
+image_width: 128
+image_height: 128
vertical_fov: 90
# depth
depth_low: 0.35
@@ -73,10 +72,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-
-
-# speed limit
-hand_threshold: 0.4
-hand_speed: 0.3
-body_speed: 0.3
\ No newline at end of file
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/fetch_rearrangement.yaml b/igibson/examples/configs/fetch_room_rearrangement.yaml
similarity index 71%
rename from igibson/configs/fetch_rearrangement.yaml
rename to igibson/examples/configs/fetch_room_rearrangement.yaml
index c0313fd05..b33408cd4 100644
--- a/igibson/configs/fetch_rearrangement.yaml
+++ b/igibson/examples/configs/fetch_room_rearrangement.yaml
@@ -14,30 +14,10 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Fetch
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- assisted_grasping_mode: null
- rigid_trunk: false
- default_trunk_offset: 0.365
- default_arm_pose: diagonal30
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- kv: 2.0
- gripper:
- name: ParallelJawGripperController
- mode: binary
- camera:
- name: JointController
- use_delta_commands: False
+robot: Fetch
+wheel_velocity: 0.8
+torso_lift_velocity: 0.8
+arm_velocity: 0.8
# task
task: room_rearrangement
@@ -97,6 +77,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/jr2_reaching.yaml b/igibson/examples/configs/jr_reaching.yaml
similarity index 74%
rename from igibson/configs/jr2_reaching.yaml
rename to igibson/examples/configs/jr_reaching.yaml
index 3a6beb73f..a9c1b1bf0 100644
--- a/igibson/configs/jr2_reaching.yaml
+++ b/igibson/examples/configs/jr_reaching.yaml
@@ -14,22 +14,10 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: JR2
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- assisted_grasping_mode: null
- controller_config:
- base:
- name: DifferentialDriveController
- arm:
- name: InverseKinematicsController
- gripper:
- name: NullGripperController
+robot: JR2_Kinova
+is_discrete: false
+wheel_velocity: 0.3
+arm_velocity: 1.0
# task
task: reaching_random
@@ -73,5 +61,5 @@ depth_high: 10.0
depth_noise_rate: 0.0
# visual objects
-visible_target: true
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/locobot_nav.yaml b/igibson/examples/configs/locobot_point_nav.yaml
similarity index 76%
rename from igibson/configs/locobot_nav.yaml
rename to igibson/examples/configs/locobot_point_nav.yaml
index bf9d6eb32..5e0001b2a 100644
--- a/igibson/configs/locobot_nav.yaml
+++ b/igibson/examples/configs/locobot_point_nav.yaml
@@ -14,20 +14,10 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Locobot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
- command_output_limits:
- - [-0.5, -1.57]
- - [0.5, 1.57]
+robot: Locobot
+is_discrete: false
+linear_velocity: 0.5
+angular_velocity: 1.5707963267948966
# task
task: point_nav_random
@@ -71,6 +61,5 @@ depth_high: 10.0
depth_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/examples/configs/njk_re-shelving_library_books_full_obs.yaml b/igibson/examples/configs/njk_re-shelving_library_books_full_obs.yaml
new file mode 100644
index 000000000..00e824a0c
--- /dev/null
+++ b/igibson/examples/configs/njk_re-shelving_library_books_full_obs.yaml
@@ -0,0 +1,66 @@
+# scene
+scene_id: Pomaria_1_int
+clutter: false
+build_graph: true
+load_texture: true
+pybullet_load_texture: true
+should_open_all_doors: true
+
+# domain randomization
+texture_randomization_freq: null
+object_randomization_freq: null
+
+# robot
+robot: BehaviorRobot
+
+# task
+# task: cleaning_cupboards
+task: re-shelving_library_books
+task_id: 0
+online_sampling: false
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
+
+# discount factor
+discount_factor: 0.99
+
+# termination condition
+dist_tol: 0.36 # body width
+max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
+
+# sensor spec
+output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
+# image
+fisheye: false
+image_width: 1280
+image_height: 1280
+vertical_fov: 120
+# depth
+depth_low: 0.0
+depth_high: 10.0
+
+# sensor noise
+depth_noise_rate: 0.0
+scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/examples/configs/njk_sorting_books_full_obs.yaml b/igibson/examples/configs/njk_sorting_books_full_obs.yaml
new file mode 100644
index 000000000..19089a3a4
--- /dev/null
+++ b/igibson/examples/configs/njk_sorting_books_full_obs.yaml
@@ -0,0 +1,66 @@
+# scene
+scene_id: Pomaria_1_int
+clutter: false
+build_graph: true
+load_texture: true
+pybullet_load_texture: true
+should_open_all_doors: true
+
+# domain randomization
+texture_randomization_freq: null
+object_randomization_freq: null
+
+# robot
+robot: BehaviorRobot
+
+# task
+# task: cleaning_cupboards
+task: sorting_books
+task_id: 0
+online_sampling: false
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
+
+# discount factor
+discount_factor: 0.99
+
+# termination condition
+dist_tol: 0.36 # body width
+max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
+
+# sensor spec
+output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
+# image
+fisheye: false
+image_width: 1280
+image_height: 1280
+vertical_fov: 120
+# depth
+depth_low: 0.0
+depth_high: 10.0
+
+# sensor noise
+depth_noise_rate: 0.0
+scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/configs/turtlebot_static_nav.yaml b/igibson/examples/configs/turtlebot_demo.yaml
similarity index 81%
rename from igibson/configs/turtlebot_static_nav.yaml
rename to igibson/examples/configs/turtlebot_demo.yaml
index 6fcaac35a..c8fa5bfb2 100644
--- a/igibson/configs/turtlebot_static_nav.yaml
+++ b/igibson/examples/configs/turtlebot_demo.yaml
@@ -14,17 +14,9 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: point_nav_random
@@ -57,8 +49,8 @@ output: [task_obs, rgb, depth, scan]
# ASUS Xtion PRO LIVE
# https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE
fisheye: false
-image_width: 640
-image_height: 480
+image_width: 160
+image_height: 120
vertical_fov: 45
# depth
depth_low: 0.8
@@ -79,5 +71,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/turtlebot_dynamic_nav.yaml b/igibson/examples/configs/turtlebot_dynamic_nav.yaml
similarity index 83%
rename from igibson/configs/turtlebot_dynamic_nav.yaml
rename to igibson/examples/configs/turtlebot_dynamic_nav.yaml
index 7c01c0326..43c7322dd 100644
--- a/igibson/configs/turtlebot_dynamic_nav.yaml
+++ b/igibson/examples/configs/turtlebot_dynamic_nav.yaml
@@ -14,17 +14,9 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: dynamic_nav_random
@@ -79,6 +71,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/turtlebot_interactive_nav.yaml b/igibson/examples/configs/turtlebot_interactive_nav.yaml
similarity index 83%
rename from igibson/configs/turtlebot_interactive_nav.yaml
rename to igibson/examples/configs/turtlebot_interactive_nav.yaml
index 191e088be..f159820d3 100644
--- a/igibson/configs/turtlebot_interactive_nav.yaml
+++ b/igibson/examples/configs/turtlebot_interactive_nav.yaml
@@ -14,17 +14,9 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: interactive_nav_random
@@ -79,6 +71,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/turtlebot_nav.yaml b/igibson/examples/configs/turtlebot_point_nav.yaml
similarity index 81%
rename from igibson/configs/turtlebot_nav.yaml
rename to igibson/examples/configs/turtlebot_point_nav.yaml
index af1300738..3e154ec35 100644
--- a/igibson/configs/turtlebot_nav.yaml
+++ b/igibson/examples/configs/turtlebot_point_nav.yaml
@@ -14,17 +14,9 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: false
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: point_nav_random
@@ -57,8 +49,8 @@ output: [task_obs, rgb, depth, scan]
# ASUS Xtion PRO LIVE
# https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE
fisheye: false
-image_width: 640
-image_height: 480
+image_width: 160
+image_height: 120
vertical_fov: 45
# depth
depth_low: 0.8
@@ -79,5 +71,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/configs/turtlebot_stadium_nav.yaml b/igibson/examples/configs/turtlebot_point_nav_stadium.yaml
similarity index 83%
rename from igibson/configs/turtlebot_stadium_nav.yaml
rename to igibson/examples/configs/turtlebot_point_nav_stadium.yaml
index 0ca201411..ee002a49d 100644
--- a/igibson/configs/turtlebot_stadium_nav.yaml
+++ b/igibson/examples/configs/turtlebot_point_nav_stadium.yaml
@@ -13,17 +13,9 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: point_nav_random
@@ -78,6 +70,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-visible_path: false
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/examples/configs/wbm3_modifiable_full_obs.yaml b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml
new file mode 100644
index 000000000..fb427ef67
--- /dev/null
+++ b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml
@@ -0,0 +1,66 @@
+# scene
+scene_id: scene_placeholder
+clutter: false
+build_graph: true
+load_texture: true
+pybullet_load_texture: true
+should_open_all_doors: true
+
+# domain randomization
+texture_randomization_freq: null
+object_randomization_freq: null
+
+# robot
+robot: BehaviorRobot
+
+# task
+# task: cleaning_cupboards
+task: task_placeholder
+task_id: 0
+online_sampling: online_sampling_placeholder
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
+
+# discount factor
+discount_factor: 0.99
+
+# termination condition
+dist_tol: 0.36 # body width
+max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
+
+# sensor spec
+output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
+# image
+fisheye: false
+image_width: 1280
+image_height: 1280
+vertical_fov: 120
+# depth
+depth_low: 0.0
+depth_high: 10.0
+
+# sensor noise
+depth_noise_rate: 0.0
+scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/examples/configs/wbm3_modifiable_full_obs.yaml_tmp b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml_tmp
new file mode 100644
index 000000000..ddddc5a1d
--- /dev/null
+++ b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml_tmp
@@ -0,0 +1,66 @@
+# scene
+scene_id: Ihlen_1_int
+clutter: false
+build_graph: true
+load_texture: true
+pybullet_load_texture: true
+should_open_all_doors: true
+
+# domain randomization
+texture_randomization_freq: null
+object_randomization_freq: null
+
+# robot
+robot: BehaviorRobot
+
+# task
+# task: cleaning_cupboards
+task: throwing_away_leftovers
+task_id: 0
+online_sampling: False
+target_dist_min: 1.0
+target_dist_max: 10.0
+goal_format: polar
+task_obs_dim: 4
+
+# reward
+reward_type: l2
+success_reward: 10.0
+potential_reward_weight: 1.0
+collision_reward_weight: -0.1
+
+# discount factor
+discount_factor: 0.99
+
+# termination condition
+dist_tol: 0.36 # body width
+max_step: 500
+max_collisions_allowed: 500
+
+# misc config
+initial_pos_z_offset: 0.1
+collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links
+
+# sensor spec
+output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs]
+# image
+fisheye: false
+image_width: 1280
+image_height: 1280
+vertical_fov: 120
+# depth
+depth_low: 0.0
+depth_high: 10.0
+
+# sensor noise
+depth_noise_rate: 0.0
+scan_noise_rate: 0.0
+
+# visual objects
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
+
+# speed limit
+hand_threshold: 0.4
+hand_speed: 0.3
+body_speed: 0.3
diff --git a/igibson/examples/object_states/__init__.py b/igibson/examples/demo/__init__.py
similarity index 100%
rename from igibson/examples/object_states/__init__.py
rename to igibson/examples/demo/__init__.py
diff --git a/igibson/examples/demo/cleaning_demo.py b/igibson/examples/demo/cleaning_demo.py
new file mode 100644
index 000000000..f6a8e597e
--- /dev/null
+++ b/igibson/examples/demo/cleaning_demo.py
@@ -0,0 +1,43 @@
+from igibson import object_states
+from igibson.object_states.factory import prepare_object_states
+from igibson.objects.ycb_object import YCBObject
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0)
+ scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False)
+ s.import_ig_scene(scene)
+
+ block = YCBObject(name="036_wood_block")
+ block.abilities = ["soakable", "cleaningTool"]
+ prepare_object_states(block, abilities={"soakable": {}, "cleaningTool": {}})
+ s.import_object(block)
+ block.set_position([1, 1, 1.8])
+
+ # Set everything that can go dirty.
+ stateful_objects = set(
+ scene.get_objects_with_state(object_states.Dusty)
+ + scene.get_objects_with_state(object_states.Stained)
+ + scene.get_objects_with_state(object_states.WaterSource)
+ )
+ for obj in stateful_objects:
+ if object_states.Dusty in obj.states:
+ obj.states[object_states.Dusty].set_value(True)
+
+ if object_states.Stained in obj.states:
+ obj.states[object_states.Stained].set_value(True)
+
+ if object_states.WaterSource in obj.states and object_states.ToggledOn in obj.states:
+ obj.states[object_states.ToggledOn].set_value(True)
+
+ try:
+ while True:
+ s.step()
+ finally:
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/cleaning_demo_loader.py b/igibson/examples/demo/cleaning_demo_loader.py
new file mode 100644
index 000000000..f82080708
--- /dev/null
+++ b/igibson/examples/demo/cleaning_demo_loader.py
@@ -0,0 +1,26 @@
+from igibson import object_states
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", device_idx=0)
+ scene = InteractiveIndoorScene("Rs_int", urdf_file="cleaning_demo", merge_fixed_links=True)
+ s.import_ig_scene(scene)
+
+ water_sources = set(scene.get_objects_with_state(object_states.WaterSource)) & set(
+ scene.get_objects_with_state(object_states.ToggledOn)
+ )
+ for obj in water_sources:
+ obj.states[object_states.ToggledOn].set_value(True)
+
+ # Let the user view the frozen scene in the UI for purposes of comparison.
+ try:
+ while True:
+ pass
+ finally:
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/cleaning_demo_saver.py b/igibson/examples/demo/cleaning_demo_saver.py
new file mode 100644
index 000000000..4e90137aa
--- /dev/null
+++ b/igibson/examples/demo/cleaning_demo_saver.py
@@ -0,0 +1,48 @@
+from igibson import object_states
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", device_idx=0)
+ scene = InteractiveIndoorScene(
+ "Rs_int", texture_randomization=False, object_randomization=False, merge_fixed_links=False
+ )
+ s.import_ig_scene(scene)
+
+ # Set everything that can go dirty.
+ stateful_objects = set(
+ scene.get_objects_with_state(object_states.Dusty)
+ + scene.get_objects_with_state(object_states.Stained)
+ + scene.get_objects_with_state(object_states.WaterSource)
+ + scene.get_objects_with_state(object_states.Open)
+ )
+ for obj in stateful_objects:
+ if object_states.Dusty in obj.states:
+ obj.states[object_states.Dusty].set_value(True)
+
+ if object_states.Stained in obj.states:
+ obj.states[object_states.Stained].set_value(True)
+
+ if object_states.Open in obj.states:
+ obj.states[object_states.Open].set_value(True)
+
+ if object_states.WaterSource in obj.states:
+ obj.states[object_states.ToggledOn].set_value(True)
+
+ # Take some steps so water drops appear
+ for i in range(100):
+ s.step()
+
+ scene.save_modified_urdf("cleaning_demo")
+
+ # Let the user view the frozen scene in the UI for purposes of comparison.
+ try:
+ while True:
+ pass
+ finally:
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/demo_static.py b/igibson/examples/demo/demo_static.py
new file mode 100644
index 000000000..a2f71bda1
--- /dev/null
+++ b/igibson/examples/demo/demo_static.py
@@ -0,0 +1,33 @@
+import os
+
+import igibson
+from igibson.robots.turtlebot_robot import Turtlebot
+from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import download_assets, download_demo_data
+from igibson.utils.utils import parse_config
+
+
+class DemoStatic(object):
+ def __init__(self):
+ download_assets()
+ download_demo_data()
+
+ def run_demo(self):
+ config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml"))
+
+ s = Simulator(mode="gui", image_width=700, image_height=700)
+ scene = StaticIndoorScene("Rs", pybullet_load_texture=True)
+ s.import_scene(scene)
+ turtlebot = Turtlebot(config)
+ s.import_robot(turtlebot)
+
+ for i in range(1000):
+ turtlebot.apply_action([0.1, 0.5])
+ s.step()
+
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ DemoStatic().run_demo()
diff --git a/igibson/examples/demo/env_example.py b/igibson/examples/demo/env_example.py
new file mode 100644
index 000000000..4c665e32b
--- /dev/null
+++ b/igibson/examples/demo/env_example.py
@@ -0,0 +1,25 @@
+import logging
+import os
+
+import igibson
+from igibson.envs.igibson_env import iGibsonEnv
+from igibson.render.profiler import Profiler
+
+
+def main():
+ config_filename = os.path.join(igibson.example_config_path, "turtlebot_demo.yaml")
+ env = iGibsonEnv(config_file=config_filename, mode="gui")
+ for j in range(10):
+ env.reset()
+ for i in range(100):
+ with Profiler("Environment action step"):
+ action = env.action_space.sample()
+ state, reward, done, info = env.step(action)
+ if done:
+ logging.info("Episode finished after {} timesteps".format(i + 1))
+ break
+ env.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/env_interactive_example.py b/igibson/examples/demo/env_interactive_example.py
new file mode 100644
index 000000000..e5e68f471
--- /dev/null
+++ b/igibson/examples/demo/env_interactive_example.py
@@ -0,0 +1,25 @@
+import logging
+import os
+
+import igibson
+from igibson.envs.igibson_env import iGibsonEnv
+from igibson.render.profiler import Profiler
+
+
+def main():
+ config_filename = os.path.join(igibson.example_config_path, "turtlebot_point_nav.yaml")
+ env = iGibsonEnv(config_file=config_filename, mode="gui")
+ for j in range(10):
+ env.reset()
+ for i in range(100):
+ with Profiler("Environment action step"):
+ action = env.action_space.sample()
+ state, reward, done, info = env.step(action)
+ if done:
+ logging.info("Episode finished after {} timesteps".format(i + 1))
+ break
+ env.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/robots/fetch_gripper_robot_example.py b/igibson/examples/demo/fetch_gripper_robot_example.py
similarity index 84%
rename from igibson/examples/robots/fetch_gripper_robot_example.py
rename to igibson/examples/demo/fetch_gripper_robot_example.py
index 38b6235e3..f1f49897f 100644
--- a/igibson/examples/robots/fetch_gripper_robot_example.py
+++ b/igibson/examples/demo/fetch_gripper_robot_example.py
@@ -5,21 +5,21 @@
from IPython import embed
import igibson
-from igibson.robots.fetch import Fetch
+from igibson.robots.fetch_gripper_robot import FetchGripper
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
from igibson.utils.utils import parse_config
if __name__ == "__main__":
- s = Simulator(mode="headless", use_pb_gui=True)
+ s = Simulator(mode="pbgui")
scene = EmptyScene()
s.import_scene(scene)
config = parse_config(os.path.join(igibson.example_config_path, "behavior_onboard_sensing_fetch.yaml"))
- robot = Fetch(s, config)
+ robot = FetchGripper(s, config)
s.import_robot(robot)
- robot.reset()
+ robot.robot_specific_reset()
action_dim = 11
for i in range(action_dim):
embed()
diff --git a/igibson/examples/demo/igsdf_example.py b/igibson/examples/demo/igsdf_example.py
new file mode 100644
index 000000000..3a04894c1
--- /dev/null
+++ b/igibson/examples/demo/igsdf_example.py
@@ -0,0 +1,26 @@
+import argparse
+
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.render.profiler import Profiler
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--scene", type=str, help="Name of the scene in the iG Dataset")
+ args = parser.parse_args()
+ settings = MeshRendererSettings(enable_shadow=True, msaa=False)
+ s = Simulator(mode="gui", image_width=256, image_height=256, rendering_settings=settings)
+
+ scene = InteractiveIndoorScene(args.scene)
+ s.import_ig_scene(scene)
+
+ for i in range(10000):
+ with Profiler("Simulator step"):
+ s.step()
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/objects/obj_rotating_video_gen.py b/igibson/examples/demo/igsdf_obj_viz.py
similarity index 87%
rename from igibson/examples/objects/obj_rotating_video_gen.py
rename to igibson/examples/demo/igsdf_obj_viz.py
index b591c6bbe..41193f639 100644
--- a/igibson/examples/objects/obj_rotating_video_gen.py
+++ b/igibson/examples/demo/igsdf_obj_viz.py
@@ -1,16 +1,16 @@
-import argparse
-import logging
import os
import subprocess
+import sys
import numpy as np
from PIL import Image
import igibson
+
+# from igibson.utils.assets_utils import get_model_path
from igibson.objects.articulated_object import ArticulatedObject
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.profiler import Profiler
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
@@ -77,20 +77,9 @@ def load_obj_np(
def main():
- """
- Generates videos rotating a camera around an object
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--model_path",
- default=os.path.join(igibson.ig_dataset_path, "objects", "rocking_chair", "rocking_chair_0002"),
- help="Model to load",
- )
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
global _mouse_ix, _mouse_iy, down, view_direction
- args = parser.parse_args()
- model_path = args.model_path
+ model_path = sys.argv[1]
print(model_path)
model_id = os.path.basename(model_path)
@@ -102,10 +91,6 @@ def main():
)
s = Simulator(mode="headless", image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings)
- scene = InteractiveIndoorScene(
- "Rs_int", texture_randomization=False, load_object_categories=["window"], object_randomization=False
- )
- s.import_scene(scene)
s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0])
diff --git a/igibson/examples/scenes/scene_tour_video_gen.py b/igibson/examples/demo/igsdf_tour.py
similarity index 88%
rename from igibson/examples/scenes/scene_tour_video_gen.py
rename to igibson/examples/demo/igsdf_tour.py
index 9edc4eb6c..71025526d 100644
--- a/igibson/examples/scenes/scene_tour_video_gen.py
+++ b/igibson/examples/demo/igsdf_tour.py
@@ -1,9 +1,7 @@
import argparse
-import logging
import os
import random
import subprocess
-from sys import platform
import numpy as np
from PIL import Image
@@ -17,13 +15,8 @@
def main():
- """
- Generates videos navigating in the iG scenes
- Loads an iG scene, predefined paths and produces a video. Alternate random textures and/or objects, on demand.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
parser = argparse.ArgumentParser()
- parser.add_argument("--scene", type=str, help="Name of the scene in the iG Dataset", default="Rs_int")
+ parser.add_argument("--scene", type=str, help="Name of the scene in the iG Dataset")
parser.add_argument("--save_dir", type=str, help="Directory to save the frames.", default="misc")
parser.add_argument("--seed", type=int, default=15, help="Random seed.")
parser.add_argument("--domain_rand", dest="domain_rand", action="store_true")
@@ -48,7 +41,6 @@ def main():
msaa=True,
skybox_size=36.0,
light_dimming_factor=0.8,
- texture_scale=0.5 if platform == "darwin" else 1.0, # Reduce scale if in Mac
)
s = Simulator(mode="headless", image_width=1080, image_height=720, vertical_fov=60, rendering_settings=settings)
@@ -58,9 +50,8 @@ def main():
args.scene, texture_randomization=args.domain_rand, object_randomization=args.object_rand
)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
- # Load trajectory path
traj_path = os.path.join(get_ig_scene_path(args.scene), "misc", "tour_cam_trajectory.txt")
save_dir = os.path.join(get_ig_scene_path(args.scene), args.save_dir)
os.makedirs(save_dir, exist_ok=True)
diff --git a/igibson/examples/robots/ik_example.py b/igibson/examples/demo/ik_example.py
similarity index 94%
rename from igibson/examples/robots/ik_example.py
rename to igibson/examples/demo/ik_example.py
index ac4d615dc..c9de39f7c 100644
--- a/igibson/examples/robots/ik_example.py
+++ b/igibson/examples/demo/ik_example.py
@@ -1,5 +1,3 @@
-import os
-
import numpy as np
import pybullet as p
@@ -13,7 +11,7 @@
set_joint_positions,
)
from igibson.render.profiler import Profiler
-from igibson.robots.fetch import Fetch
+from igibson.robots.fetch_robot import Fetch
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
from igibson.utils.utils import parse_config
@@ -21,13 +19,13 @@
def main():
config = parse_config(os.path.join(igibson.example_config_path, "fetch_reaching.yaml"))
- s = Simulator(mode="gui_interactive", use_pb_gui=True, physics_timestep=1 / 240.0)
+ s = Simulator(mode="gui", physics_timestep=1 / 240.0)
scene = EmptyScene()
s.import_scene(scene)
fetch = Fetch(config)
s.import_robot(fetch)
- robot_id = fetch.get_body_id()
+ robot_id = fetch.robot_ids[0]
arm_joints = joints_from_names(
robot_id,
@@ -92,7 +90,7 @@ def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos, thresh
threshold = 0.01
maxIter = 100
joint_pos = accurateCalculateInverseKinematics(
- robot_id, fetch.links["gripper_link"].body_part_index, [x, y, z], threshold, maxIter
+ robot_id, fetch.parts["gripper_link"].body_part_index, [x, y, z], threshold, maxIter
)[2:10]
s.step()
diff --git a/igibson/examples/observations/generate_lidar_velodyne.py b/igibson/examples/demo/lidar_velodyne_example.py
similarity index 53%
rename from igibson/examples/observations/generate_lidar_velodyne.py
rename to igibson/examples/demo/lidar_velodyne_example.py
index 837cf25ad..82a9a28f1 100644
--- a/igibson/examples/observations/generate_lidar_velodyne.py
+++ b/igibson/examples/demo/lidar_velodyne_example.py
@@ -1,43 +1,33 @@
-import logging
import os
import matplotlib.pyplot as plt
+import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import igibson
+from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.robots.turtlebot import Turtlebot
+from igibson.render.profiler import Profiler
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.simulator import Simulator
from igibson.utils.utils import parse_config
def main():
- """
- Example of rendering and visualizing velodyne lidar signals
- Loads Rs (non interactive) and a robot and renders a velodyne signal from the robot's camera
- It plots the velodyne point cloud with matplotlib
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.01)
+ config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml"))
+ settings = MeshRendererSettings()
s = Simulator(mode="headless", image_width=256, image_height=256, rendering_settings=settings)
- scene = StaticIndoorScene("Rs", build_graph=True)
+ scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True)
s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
+ turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
turtlebot.apply_action([0.1, -0.1])
s.step()
-
- # Get velodyne lidar
lidar = s.renderer.get_lidar_all()
- logging.info("Dimensions of the lidar observation: {}".format(lidar.shape))
-
- # Visualize velodyne lidar
+ print(lidar.shape)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(lidar[:, 0], lidar[:, 1], lidar[:, 2], s=3)
diff --git a/igibson/examples/renderer/mesh_renderer_example.py b/igibson/examples/demo/mesh_renderer_example.py
similarity index 68%
rename from igibson/examples/renderer/mesh_renderer_example.py
rename to igibson/examples/demo/mesh_renderer_example.py
index cce5a47c7..64c296ad5 100644
--- a/igibson/examples/renderer/mesh_renderer_example.py
+++ b/igibson/examples/demo/mesh_renderer_example.py
@@ -1,4 +1,3 @@
-import logging
import os
import sys
@@ -11,39 +10,31 @@
def main():
- """
- Creates renderer and renders RGB images in Rs (no interactive). No physics.
- The camera view can be controlled.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
global _mouse_ix, _mouse_iy, down, view_direction
- # If a model is given, we load it, otherwise we load Rs mesh (non interactive)
if len(sys.argv) > 1:
model_path = sys.argv[1]
else:
model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj")
- # Create renderer object and load the scene model
renderer = MeshRenderer(width=512, height=512)
renderer.load_object(model_path)
- renderer.add_instance_group([0])
- # Print some information about the loaded model
- logging.info(renderer.visual_objects, renderer.instances)
- logging.info(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
+ renderer.add_instance(0)
+ print(renderer.visual_objects, renderer.instances)
+ print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
- # Create a simple viewer with OpenCV and a keyboard navigation
px = 0
py = 0.2
+
camera_pose = np.array([px, py, 0.5])
view_direction = np.array([0, -1, -1])
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
renderer.set_fov(90)
+
_mouse_ix, _mouse_iy = -1, -1
down = False
- # Define the function callback for OpenCV events on the window
def change_dir(event, x, y, flags, param):
global _mouse_ix, _mouse_iy, down, view_direction
if event == cv2.EVENT_LBUTTONDOWN:
@@ -61,14 +52,13 @@ def change_dir(event, x, y, flags, param):
elif event == cv2.EVENT_LBUTTONUP:
down = False
- cv2.namedWindow("Viewer")
- cv2.setMouseCallback("Viewer", change_dir)
+ cv2.namedWindow("test")
+ cv2.setMouseCallback("test", change_dir)
- # Move camera and render
while True:
with Profiler("Render"):
frame = renderer.render(modes=("rgb"))
- cv2.imshow("Viewer", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
+ cv2.imshow("test", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
q = cv2.waitKey(1)
if q == ord("w"):
px += 0.01
@@ -83,7 +73,14 @@ def change_dir(event, x, y, flags, param):
camera_pose = np.array([px, py, 0.5])
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
- # Cleanup
+ # start = time.time()
+
+ # for i in range(100):
+ # frame = renderer.render(modes=('rgb', 'normal', '3d'))
+
+ # elapsed = time.time() - start
+ # print("num objects {} fps {}".format(len(renderer.instances), 100/elapsed))
+
renderer.release()
diff --git a/igibson/examples/demo/mesh_renderer_example_pano.py b/igibson/examples/demo/mesh_renderer_example_pano.py
new file mode 100644
index 000000000..4b1d4a6e8
--- /dev/null
+++ b/igibson/examples/demo/mesh_renderer_example_pano.py
@@ -0,0 +1,45 @@
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.utils.assets_utils import get_scene_path
+
+
+def main():
+ global _mouse_ix, _mouse_iy, down, view_direction
+
+ if len(sys.argv) > 1:
+ model_path = sys.argv[1]
+ else:
+ model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj")
+
+ settings = MeshRendererSettings(enable_pbr=False)
+ renderer = MeshRenderer(width=512, height=512, rendering_settings=settings)
+ renderer.load_object(model_path)
+
+ renderer.add_instance(0)
+ print(renderer.visual_objects, renderer.instances)
+ print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
+
+ px = 0
+ py = 0.2
+
+ camera_pose = np.array([px, py, 0.5])
+ view_direction = np.array([0, -1, -0.3])
+ renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
+ renderer.set_fov(90)
+
+ img = renderer.get_equi()
+ print(img.shape)
+ plt.imshow(img)
+ plt.show()
+
+ renderer.release()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/renderer/mesh_renderer_pbr_example.py b/igibson/examples/demo/mesh_renderer_example_pbr.py
similarity index 77%
rename from igibson/examples/renderer/mesh_renderer_pbr_example.py
rename to igibson/examples/demo/mesh_renderer_example_pbr.py
index 43db4e770..d119e6d7c 100644
--- a/igibson/examples/renderer/mesh_renderer_pbr_example.py
+++ b/igibson/examples/demo/mesh_renderer_example_pbr.py
@@ -1,11 +1,9 @@
-import logging
import os
import sys
import cv2
import numpy as np
-import igibson
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.profiler import Profiler
@@ -13,8 +11,7 @@
def load_obj_np(filename_obj, normalization=False, texture_size=4, texture_wrapping="REPEAT", use_bilinear=True):
- """
- Load Wavefront OBJ file into numpy array
+ """Load Wavefront .obj file into numpy array
This function only supports vertices (v x x x) and faces (f x x x).
"""
# load vertices
@@ -43,62 +40,57 @@ def load_obj_np(filename_obj, normalization=False, texture_size=4, texture_wrapp
v2 = int(vs[i + 2].split("/")[0])
faces.append((v0, v1, v2))
faces = np.vstack(faces).astype(np.int32) - 1
- assert normalization is False
+
+ assert normalization is False # Since I commented out the block below
+
return vertices, faces
def main():
- """
- Minimal example of use of the PBR renderer. Loads Rs_int (interactive) or one object.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
global _mouse_ix, _mouse_iy, down, view_direction
if len(sys.argv) > 1:
model_path = sys.argv[1]
else:
- model_path = os.path.join(igibson.ig_dataset_path, "scenes", "Rs_int", "shape", "visual")
+ model_path = os.path.join(get_scene_path("Rs_int"), "mesh_z_up.obj")
settings = MeshRendererSettings(msaa=True, enable_shadow=True)
renderer = MeshRenderer(width=512, height=512, vertical_fov=70, rendering_settings=settings)
renderer.set_light_position_direction([0, 0, 10], [0, 0, 0])
i = 0
- v = []
- # If a model path is given and it is an OBJ file we load it
+ v = []
for fn in os.listdir(model_path):
if fn.endswith("obj"):
vertices, faces = load_obj_np(os.path.join(model_path, fn))
v.append(vertices)
v = np.vstack(v)
- logging.info("Number of vertices {}".format(v.shape))
+ print(v.shape)
xlen = np.max(v[:, 0]) - np.min(v[:, 0])
ylen = np.max(v[:, 1]) - np.min(v[:, 1])
scale = 2.0 / (max(xlen, ylen))
- # If a model path is given and it is an OBJ file we load it
for fn in os.listdir(model_path):
if fn.endswith("obj"):
renderer.load_object(os.path.join(model_path, fn), scale=[scale, scale, scale])
- renderer.add_instance_group([i])
+ renderer.add_instance(i)
i += 1
- # Logging some info
- logging.info(renderer.visual_objects, renderer.instances)
- logging.info(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
+ print(renderer.visual_objects, renderer.instances)
+ print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
- # Create a simple viewer with OpenCV
px = 1
py = 1
pz = 1
+
camera_pose = np.array([px, py, pz])
view_direction = np.array([-1, -1, -1])
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
+
_mouse_ix, _mouse_iy = -1, -1
down = False
- # Callback function for OpenCV viewer
def change_dir(event, x, y, flags, param):
global _mouse_ix, _mouse_iy, down, view_direction
if event == cv2.EVENT_LBUTTONDOWN:
@@ -116,14 +108,13 @@ def change_dir(event, x, y, flags, param):
elif event == cv2.EVENT_LBUTTONUP:
down = False
- cv2.namedWindow("Viewer")
- cv2.setMouseCallback("Viewer", change_dir)
+ cv2.namedWindow("test")
+ cv2.setMouseCallback("test", change_dir)
- # Move camera and render
while True:
with Profiler("Render"):
frame = renderer.render(modes=("rgb", "normal", "seg", "ins_seg"))
- cv2.imshow("Viewer", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
+ cv2.imshow("test", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
q = cv2.waitKey(1)
if q == ord("w"):
diff --git a/igibson/examples/renderer/mesh_renderer_gpu_example.py b/igibson/examples/demo/mesh_renderer_gpu_example.py
similarity index 87%
rename from igibson/examples/renderer/mesh_renderer_gpu_example.py
rename to igibson/examples/demo/mesh_renderer_gpu_example.py
index bd9af9369..201a9c93c 100644
--- a/igibson/examples/renderer/mesh_renderer_gpu_example.py
+++ b/igibson/examples/demo/mesh_renderer_gpu_example.py
@@ -1,5 +1,4 @@
import os
-import platform
import sys
import matplotlib.pyplot as plt
@@ -11,10 +10,6 @@
def main():
- if platform.system() != "Linux":
- print("Rendering to pytorch tensor is only available on Linux.")
- exit(1)
-
if len(sys.argv) > 1:
model_path = sys.argv[1]
else:
@@ -22,7 +17,7 @@ def main():
renderer = MeshRendererG2G(width=512, height=512, device_idx=0)
renderer.load_object(model_path)
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
print(renderer.visual_objects, renderer.instances)
print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
diff --git a/igibson/examples/renderer/mesh_renderer_simple_example.py b/igibson/examples/demo/mesh_renderer_simple_example.py
similarity index 60%
rename from igibson/examples/renderer/mesh_renderer_simple_example.py
rename to igibson/examples/demo/mesh_renderer_simple_example.py
index 20aff6f7d..e33d47bd5 100644
--- a/igibson/examples/renderer/mesh_renderer_simple_example.py
+++ b/igibson/examples/demo/mesh_renderer_simple_example.py
@@ -1,4 +1,3 @@
-import logging
import os
import sys
@@ -10,13 +9,6 @@
def main():
- """
- Minimal example of use of the renderer. Loads Rs (non interactive), renders one set of images (RGB, normals,
- 3D points (as depth)), shows them.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- # If a model is given, we load it, otherwise we load Rs mesh (non interactive)
if len(sys.argv) > 1:
model_path = sys.argv[1]
else:
@@ -24,18 +16,12 @@ def main():
renderer = MeshRenderer(width=512, height=512)
renderer.load_object(model_path)
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
camera_pose = np.array([0, 0, 1.2])
view_direction = np.array([1, 0, 0])
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
renderer.set_fov(90)
frames = renderer.render(modes=("rgb", "normal", "3d"))
-
- # Render 3d points as depth map
- depth = np.linalg.norm(frames[2][:, :, :3], axis=2)
- depth /= depth.max()
- frames[2][:, :, :3] = depth[..., None]
-
frames = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR)
cv2.imshow("image", frames)
cv2.waitKey(0)
diff --git a/igibson/examples/robots/motion_planning_example.py b/igibson/examples/demo/motion_planning_example.py
similarity index 83%
rename from igibson/examples/robots/motion_planning_example.py
rename to igibson/examples/demo/motion_planning_example.py
index b37feab3e..81a17bb2a 100644
--- a/igibson/examples/robots/motion_planning_example.py
+++ b/igibson/examples/demo/motion_planning_example.py
@@ -32,9 +32,9 @@ def run_example(args):
parser.add_argument(
"--mode",
"-m",
- choices=["headless", "headless_tensor", "gui_non_interactive", "gui_interactive"],
- default="gui_interactive",
- help="which mode for simulation (default: gui_interactive)",
+ choices=["headless", "gui", "iggui"],
+ default="iggui",
+ help="which mode for simulation (default: iggui)",
)
args = parser.parse_args()
diff --git a/igibson/examples/demo/mouse_interaction.py b/igibson/examples/demo/mouse_interaction.py
new file mode 100644
index 000000000..ef072e264
--- /dev/null
+++ b/igibson/examples/demo/mouse_interaction.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+import argparse
+import os
+import time
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pybullet as p
+
+import igibson
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import get_3dfront_scene_path, get_cubicasa_scene_path, get_ig_scene_path
+
+# human interaction demo
+
+
+def test_import_igsdf(scene_name, scene_source):
+ hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
+ hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
+
+ if scene_source == "IG":
+ scene_dir = get_ig_scene_path(scene_name)
+ elif scene_source == "CUBICASA":
+ scene_dir = get_cubicasa_scene_path(scene_name)
+ else:
+ scene_dir = get_3dfront_scene_path(scene_name)
+
+ light_modulation_map_filename = os.path.join(scene_dir, "layout", "floor_lighttype_0.png")
+ background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
+
+ scene = InteractiveIndoorScene(
+ scene_name, texture_randomization=False, object_randomization=False, scene_source=scene_source
+ )
+
+ settings = MeshRendererSettings(
+ env_texture_filename=hdr_texture,
+ env_texture_filename2=hdr_texture2,
+ env_texture_filename3=background_texture,
+ light_modulation_map_filename=light_modulation_map_filename,
+ enable_shadow=True,
+ msaa=True,
+ light_dimming_factor=1.0,
+ optimized=True,
+ )
+ s = Simulator(mode="iggui", image_width=960, image_height=720, device_idx=0, rendering_settings=settings)
+
+ s.import_ig_scene(scene)
+ fpss = []
+
+ np.random.seed(0)
+ _, (px, py, pz) = scene.get_random_point()
+ s.viewer.px = px
+ s.viewer.py = py
+ s.viewer.pz = 1.7
+ s.viewer.update()
+
+ for i in range(3000):
+ if i == 2500:
+ logId = p.startStateLogging(loggingType=p.STATE_LOGGING_PROFILE_TIMINGS, fileName="trace_beechwood")
+ start = time.time()
+ s.step()
+ end = time.time()
+ print("Elapsed time: ", end - start)
+ print("Frequency: ", 1 / (end - start))
+ fpss.append(1 / (end - start))
+ p.stopStateLogging(logId)
+ s.disconnect()
+ print("end")
+
+ plt.plot(fpss)
+ plt.show()
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Open a scene with iGibson interactive viewer.")
+ parser.add_argument("--scene", dest="scene_name", type=str, default="Rs_int", help="The name of the scene to load")
+ parser.add_argument(
+ "--source",
+ dest="scene_source",
+ type=str,
+ default="IG",
+ help="The name of the source dataset, among [IG,CUBICASA,THREEDFRONT]",
+ )
+ args = parser.parse_args()
+ test_import_igsdf(args.scene_name, args.scene_source)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/object_example.py b/igibson/examples/demo/object_example.py
new file mode 100644
index 000000000..f6dca1f68
--- /dev/null
+++ b/igibson/examples/demo/object_example.py
@@ -0,0 +1,43 @@
+import os
+import time
+
+import pybullet as p
+import pybullet_data
+
+import igibson
+from igibson.objects.articulated_object import ArticulatedObject
+from igibson.objects.ycb_object import YCBObject
+
+
+def main():
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1.0 / 240.0)
+
+ floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
+ p.loadMJCF(floor)
+
+ cabinet_0007 = os.path.join(igibson.assets_path, "models/cabinet2/cabinet_0007.urdf")
+ cabinet_0004 = os.path.join(igibson.assets_path, "models/cabinet/cabinet_0004.urdf")
+
+ obj1 = ArticulatedObject(filename=cabinet_0007)
+ obj1.load()
+ obj1.set_position([0, 0, 0.5])
+
+ obj2 = ArticulatedObject(filename=cabinet_0004)
+ obj2.load()
+ obj2.set_position([0, 0, 2])
+
+ obj3 = YCBObject("003_cracker_box")
+ obj3.load()
+ obj3.set_position_orientation([0, 0, 1.2], [0, 0, 0, 1])
+
+ for _ in range(24000): # at least 100 seconds
+ p.stepSimulation()
+ time.sleep(1.0 / 240.0)
+
+ p.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/particle_demo.py b/igibson/examples/demo/particle_demo.py
new file mode 100644
index 000000000..583ca988d
--- /dev/null
+++ b/igibson/examples/demo/particle_demo.py
@@ -0,0 +1,78 @@
+import os
+
+import numpy as np
+
+from igibson import object_states
+from igibson.objects.articulated_object import URDFObject
+from igibson.scenes.empty_scene import EmptyScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import get_ig_model_path
+
+
+def main():
+ s = Simulator(mode="iggui", image_width=1280, image_height=720)
+
+ scene = EmptyScene()
+ s.import_scene(scene)
+
+ model_path = os.path.join(get_ig_model_path("sink", "sink_1"), "sink_1.urdf")
+
+ sink = URDFObject(
+ filename=model_path,
+ category="sink",
+ name="sink_1",
+ scale=np.array([0.8, 0.8, 0.8]),
+ abilities={"toggleable": {}, "waterSource": {}},
+ )
+
+ s.import_object(sink)
+ sink.set_position([1, 1, 0.8])
+ sink.states[object_states.ToggledOn].set_value(True)
+
+ model_path = get_ig_model_path("scrub_brush", "scrub_brush_000")
+ model_filename = os.path.join(model_path, "scrub_brush_000.urdf")
+ max_bbox = [0.1, 0.1, 0.1]
+ avg = {"size": max_bbox, "density": 67.0}
+ brush = URDFObject(
+ filename=model_filename,
+ category="scrub_brush",
+ name="scrub_brush",
+ avg_obj_dims=avg,
+ fit_avg_dim_volume=True,
+ model_path=model_path,
+ )
+ s.import_object(brush)
+ brush.set_position([0, -2, 0.4])
+
+ model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf")
+ desk = URDFObject(
+ filename=model_path,
+ category="table",
+ name="19898",
+ scale=np.array([0.8, 0.8, 0.8]),
+ abilities={"dustyable": {}, "stainable": {}},
+ )
+
+ print(desk.states.keys())
+ s.import_object(desk)
+ desk.set_position([1, -2, 0.4])
+ assert desk.states[object_states.Dusty].set_value(True)
+ assert desk.states[object_states.Stained].set_value(True)
+
+ model_path = os.path.join(get_ig_model_path("bowl", "68_0"), "68_0.urdf")
+ bowl = URDFObject(filename=model_path, category="bowl", name="bowl", abilities={"dustyable": {}, "stainable": {}})
+ s.import_object(bowl)
+ assert bowl.states[object_states.OnTop].set_value(desk, True, use_ray_casting_method=True)
+ assert bowl.states[object_states.Dusty].set_value(True)
+ assert bowl.states[object_states.Stained].set_value(True)
+ # Main simulation loop
+ try:
+ while True:
+ s.step()
+ print("Dirty: ", desk.states[object_states.Dusty].get_value())
+ finally:
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/particle_reset_demo.py b/igibson/examples/demo/particle_reset_demo.py
new file mode 100644
index 000000000..bb3db0714
--- /dev/null
+++ b/igibson/examples/demo/particle_reset_demo.py
@@ -0,0 +1,86 @@
+import os
+
+import numpy as np
+import pybullet as p
+
+from igibson import object_states
+from igibson.objects.articulated_object import URDFObject
+from igibson.scenes.empty_scene import EmptyScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import get_ig_model_path
+
+
+def main():
+ s = Simulator(mode="gui", image_width=1280, image_height=720)
+
+ scene = EmptyScene()
+ s.import_scene(scene)
+
+ model_path = os.path.join(get_ig_model_path("sink", "sink_1"), "sink_1.urdf")
+
+ sink = URDFObject(
+ filename=model_path,
+ category="sink",
+ name="sink_1",
+ scale=np.array([0.8, 0.8, 0.8]),
+ abilities={"toggleable": {}, "waterSource": {}},
+ )
+
+ s.import_object(sink)
+ sink.set_position([1, 1, 0.8])
+ sink.states[object_states.ToggledOn].set_value(True)
+
+ model_path = get_ig_model_path("vacuum", "vacuum_000")
+ model_filename = os.path.join(model_path, "vacuum_000.urdf")
+ max_bbox = [0.1, 0.1, 0.1]
+ avg = {"size": max_bbox, "density": 67.0}
+ brush = URDFObject(
+ filename=model_filename, category="vacuum", name="vacuum", avg_obj_dims=avg, fit_avg_dim_volume=True
+ )
+ s.import_object(brush)
+ brush.set_position([0, -2, 0.4])
+
+ model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf")
+ desk = URDFObject(
+ filename=model_path,
+ category="table",
+ name="19898",
+ scale=np.array([0.8, 0.8, 0.8]),
+ abilities={"dustyable": {}},
+ )
+
+ print(desk.states.keys())
+ s.import_object(desk)
+ desk.set_position([1, -2, 0.4])
+ desk.states[object_states.Dusty].set_value(True)
+
+ # Dump the initial state.
+ state_dump = p.saveState()
+ dump = desk.dump_state()
+ print(dump)
+
+ # Main simulation loop.
+ try:
+ # Keep stepping until we reach a clean state.
+ while desk.states[object_states.Dusty].get_value():
+ s.step()
+ if not desk.states[object_states.Dusty].get_value():
+ print("Cleaned.")
+ print("Dirty: ", desk.states[object_states.Dusty].get_value())
+
+ # Reset to the initial state
+ p.restoreState(state_dump)
+ p.removeState(state_dump)
+ desk.load_state(dump)
+ desk.force_wakeup()
+
+ # Continue simulation
+ while True:
+ s.step()
+ print("Dirty: ", desk.states[object_states.Dusty].get_value())
+ finally:
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/physics_engine_example.py b/igibson/examples/demo/physics_engine_example.py
new file mode 100644
index 000000000..8c77710b4
--- /dev/null
+++ b/igibson/examples/demo/physics_engine_example.py
@@ -0,0 +1,50 @@
+import os
+import sys
+
+import pybullet as p
+
+import igibson
+from igibson.utils.assets_utils import get_scene_path
+
+
+def main():
+ if len(sys.argv) > 1:
+ model_path = sys.argv[1]
+ else:
+ model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj")
+
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1.0 / 240.0)
+
+ # Load scenes
+ collision_id = p.createCollisionShape(
+ p.GEOM_MESH, fileName=model_path, meshScale=1.0, flags=p.GEOM_FORCE_CONCAVE_TRIMESH
+ )
+ visual_id = p.createVisualShape(p.GEOM_MESH, fileName=model_path, meshScale=1.0)
+
+ mesh_id = p.createMultiBody(baseCollisionShapeIndex=collision_id, baseVisualShapeIndex=visual_id)
+
+ # Load robots
+ turtlebot_urdf = os.path.join(igibson.assets_path, "models/turtlebot/turtlebot.urdf")
+ robot_id = p.loadURDF(turtlebot_urdf, flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
+
+ # Load objects
+ obj_visual_filename = os.path.join(igibson.assets_path, "models/ycb/002_master_chef_can/textured_simple.obj")
+ obj_collision_filename = os.path.join(
+ igibson.assets_path, "models/ycb/002_master_chef_can/textured_simple_vhacd.obj"
+ )
+ collision_id = p.createCollisionShape(p.GEOM_MESH, fileName=obj_collision_filename, meshScale=1.0)
+ visual_id = p.createVisualShape(p.GEOM_MESH, fileName=obj_visual_filename, meshScale=1.0)
+ object_id = p.createMultiBody(
+ baseCollisionShapeIndex=collision_id, baseVisualShapeIndex=visual_id, basePosition=[1.0, 0.0, 1.0], baseMass=0.1
+ )
+
+ for _ in range(10000):
+ p.stepSimulation()
+
+ p.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/objects/sample_objects_logic.py b/igibson/examples/demo/ray_cast_sampling.py
similarity index 70%
rename from igibson/examples/objects/sample_objects_logic.py
rename to igibson/examples/demo/ray_cast_sampling.py
index 6421aead4..ad4bebeae 100644
--- a/igibson/examples/objects/sample_objects_logic.py
+++ b/igibson/examples/demo/ray_cast_sampling.py
@@ -1,4 +1,3 @@
-import logging
import os
import numpy as np
@@ -6,7 +5,6 @@
import igibson
from igibson import object_states
from igibson.objects.articulated_object import URDFObject
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import download_assets
@@ -15,22 +13,10 @@
def main():
- """
- Demo to use the raycasting-based sampler to load objects onTop and/or inside another
- Loads a cabinet, a microwave open on top of it, and two plates with apples on top, one inside and one on top of the cabinet
- Then loads a shelf and YCB cracker boxes inside of it
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- settings = MeshRendererSettings(enable_shadow=True, msaa=False, optimized=False)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
+ s = Simulator(mode="gui")
+ scene = EmptyScene()
s.import_scene(scene)
- # Set a better viewing direction
- s.viewer.initial_pos = [-0.2, -1.3, 2.7]
- s.viewer.initial_view_direction = [0.3, 0.8, -0.5]
- s.viewer.reset_viewer()
-
try:
sample_microwave_plates_apples(s)
sample_boxes_on_shelf(s)
@@ -53,27 +39,22 @@ def sample_microwave_plates_apples(simulator):
apple_dir = os.path.join(igibson.ig_dataset_path, "objects/apple/00_0/")
apple_filename = os.path.join(apple_dir, "00_0.urdf")
- # Load cabinet, set position manually, and step 100 times
- logging.info("Loading cabinet")
cabinet = URDFObject(filename=cabinet_filename, category="cabinet", scale=np.array([2.0, 2.0, 2.0]))
simulator.import_object(cabinet)
cabinet.set_position([0, 0, 0.5])
for _ in range(100):
simulator.step()
- # Load microwave, set position on top of the cabinet, open it, and step 100 times
- logging.info("Loading microwave Open and OnTop of the cabinet")
microwave = URDFObject(
filename=microwave_filename, category="microwave", model_path=microwave_dir, scale=np.array([0.5, 0.5, 0.5])
)
simulator.import_object(microwave)
assert microwave.states[object_states.OnTop].set_value(cabinet, True, use_ray_casting_method=True)
- assert microwave.states[object_states.Open].set_value(True)
- logging.info("Microwave loaded and placed")
+ microwave.states[object_states.Open].set_value(True)
for _ in range(100):
simulator.step()
+ print("Microwave placed")
- logging.info("Loading plates")
for i in range(3):
plate = URDFObject(
filename=plate_filename, category="plate", model_path=plate_dir, bounding_box=[0.25, 0.25, 0.05]
@@ -82,22 +63,20 @@ def sample_microwave_plates_apples(simulator):
# Put the 1st plate in the microwave
if i == 0:
- logging.info("Loading plate Inside the microwave")
assert plate.states[object_states.Inside].set_value(microwave, True, use_ray_casting_method=True)
else:
- logging.info("Loading plate OnTop the microwave")
assert plate.states[object_states.OnTop].set_value(microwave, True, use_ray_casting_method=True)
- logging.info("Plate %d loaded and placed." % i)
+ print("Plate %d placed." % i)
+
for _ in range(100):
simulator.step()
- logging.info("Loading three apples OnTop of the plate")
for j in range(3):
apple = URDFObject(filename=apple_filename, category="apple", model_path=apple_dir)
simulator.import_object(apple)
assert apple.states[object_states.OnTop].set_value(plate, True, use_ray_casting_method=True)
- logging.info("Apple %d loaded and placed." % j)
+
for _ in range(100):
simulator.step()
diff --git a/igibson/examples/demo/robot_example.py b/igibson/examples/demo/robot_example.py
new file mode 100644
index 000000000..460ee2676
--- /dev/null
+++ b/igibson/examples/demo/robot_example.py
@@ -0,0 +1,64 @@
+import os
+import time
+
+import numpy as np
+import pybullet as p
+import pybullet_data
+
+import igibson
+from igibson.robots.fetch_robot import Fetch
+from igibson.robots.jr2_kinova_robot import JR2_Kinova
+from igibson.robots.locobot_robot import Locobot
+from igibson.robots.turtlebot_robot import Turtlebot
+from igibson.utils.utils import parse_config
+
+
+def main():
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1.0 / 240.0)
+
+ floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
+ p.loadMJCF(floor)
+
+ robots = []
+ config = parse_config(os.path.join(igibson.example_config_path, "fetch_reaching.yaml"))
+ fetch = Fetch(config)
+ robots.append(fetch)
+
+ config = parse_config(os.path.join(igibson.example_config_path, "jr_reaching.yaml"))
+ jr = JR2_Kinova(config)
+ robots.append(jr)
+
+ config = parse_config(os.path.join(igibson.example_config_path, "locobot_point_nav.yaml"))
+ locobot = Locobot(config)
+ robots.append(locobot)
+
+ config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_point_nav.yaml"))
+ turtlebot = Turtlebot(config)
+ robots.append(turtlebot)
+
+ positions = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]
+
+ for robot, position in zip(robots, positions):
+ robot.load()
+ robot.set_position(position)
+ robot.robot_specific_reset()
+ robot.keep_still()
+
+ for _ in range(2400): # keep still for 10 seconds
+ p.stepSimulation()
+ time.sleep(1.0 / 240.0)
+
+ for _ in range(2400): # move with small random actions for 10 seconds
+ for robot, position in zip(robots, positions):
+ action = np.random.uniform(-1, 1, robot.action_dim)
+ robot.apply_action(action)
+ p.stepSimulation()
+ time.sleep(1.0 / 240.0)
+
+ p.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_example.py b/igibson/examples/demo/scene_example.py
new file mode 100644
index 000000000..9a642eb3f
--- /dev/null
+++ b/igibson/examples/demo/scene_example.py
@@ -0,0 +1,36 @@
+import time
+
+import numpy as np
+import pybullet as p
+
+from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
+
+
+def main():
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1.0 / 240.0)
+
+ scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True)
+ scene.load()
+
+ np.random.seed(0)
+ for _ in range(10):
+ random_floor = scene.get_random_floor()
+ p1 = scene.get_random_point(random_floor)[1]
+ p2 = scene.get_random_point(random_floor)[1]
+ shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True)
+ print("random point 1:", p1)
+ print("random point 2:", p2)
+ print("geodesic distance between p1 and p2", geodesic_distance)
+ print("shortest path from p1 to p2:", shortest_path)
+
+ for _ in range(24000): # at least 100 seconds
+ p.stepSimulation()
+ time.sleep(1.0 / 240.0)
+
+ p.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_interactive_example.py b/igibson/examples/demo/scene_interactive_example.py
new file mode 100644
index 000000000..d21032db4
--- /dev/null
+++ b/igibson/examples/demo/scene_interactive_example.py
@@ -0,0 +1,23 @@
+import numpy as np
+
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0)
+ scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False)
+ s.import_ig_scene(scene)
+
+ np.random.seed(0)
+ for _ in range(10):
+ pt = scene.get_random_point_by_room_type("living_room")[1]
+ print("random point in living_room", pt)
+
+ for _ in range(1000):
+ s.step()
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_interactive_example_highlight.py b/igibson/examples/demo/scene_interactive_example_highlight.py
new file mode 100644
index 000000000..77b630206
--- /dev/null
+++ b/igibson/examples/demo/scene_interactive_example_highlight.py
@@ -0,0 +1,32 @@
+import numpy as np
+
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ settings = MeshRendererSettings(optimized=True)
+ s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0, rendering_settings=settings)
+ scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False)
+ s.import_ig_scene(scene)
+ np.random.seed(0)
+ for _ in range(10):
+ pt = scene.get_random_point_by_room_type("living_room")[1]
+ print("random point in living_room", pt)
+
+ for i in range(1000):
+ s.step()
+ if i % 100 == 0:
+ for obj in scene.objects_by_category["window"]:
+ obj.highlight()
+
+ if i % 100 == 50:
+ for obj in scene.objects_by_category["window"]:
+ obj.unhighlight()
+
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_interactive_object_rand_example.py b/igibson/examples/demo/scene_interactive_object_rand_example.py
new file mode 100644
index 000000000..e4e0188b0
--- /dev/null
+++ b/igibson/examples/demo/scene_interactive_object_rand_example.py
@@ -0,0 +1,21 @@
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0)
+
+ for random_seed in range(10):
+ scene = InteractiveIndoorScene(
+ "Rs_int", texture_randomization=False, object_randomization=True, object_randomization_idx=random_seed
+ )
+ s.import_ig_scene(scene)
+ for i in range(1000):
+ s.step()
+ s.reload()
+
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_interactive_partial_loading_example.py b/igibson/examples/demo/scene_interactive_partial_loading_example.py
new file mode 100644
index 000000000..de296b713
--- /dev/null
+++ b/igibson/examples/demo/scene_interactive_partial_loading_example.py
@@ -0,0 +1,22 @@
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0)
+ scene = InteractiveIndoorScene(
+ "Rs_int",
+ texture_randomization=False,
+ object_randomization=False,
+ load_object_categories=["chair"],
+ load_room_types=["living_room"],
+ )
+ s.import_ig_scene(scene)
+
+ for _ in range(1000):
+ s.step()
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_interactive_texture_rand_example.py b/igibson/examples/demo/scene_interactive_texture_rand_example.py
new file mode 100644
index 000000000..84f51a206
--- /dev/null
+++ b/igibson/examples/demo/scene_interactive_texture_rand_example.py
@@ -0,0 +1,18 @@
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
+from igibson.simulator import Simulator
+
+
+def main():
+ s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0)
+ scene = InteractiveIndoorScene("Rs_int", texture_randomization=True, object_randomization=False)
+ s.import_ig_scene(scene)
+
+ for i in range(10000):
+ if i % 1000 == 0:
+ scene.randomize_texture()
+ s.step()
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/scene_stadium_example.py b/igibson/examples/demo/scene_stadium_example.py
new file mode 100644
index 000000000..3b075e659
--- /dev/null
+++ b/igibson/examples/demo/scene_stadium_example.py
@@ -0,0 +1,24 @@
+import time
+
+import pybullet as p
+
+from igibson.scenes.stadium_scene import StadiumScene
+
+
+def main():
+ p.connect(p.GUI)
+ p.setGravity(0, 0, -9.8)
+ p.setTimeStep(1.0 / 240.0)
+
+ scene = StadiumScene()
+ scene.load()
+
+ for _ in range(24000): # at least 100 seconds
+ p.stepSimulation()
+ time.sleep(1.0 / 240.0)
+
+ p.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/simulator_example.py b/igibson/examples/demo/simulator_example.py
new file mode 100644
index 000000000..95e9dc94e
--- /dev/null
+++ b/igibson/examples/demo/simulator_example.py
@@ -0,0 +1,41 @@
+import os
+
+import numpy as np
+
+import igibson
+from igibson.objects.ycb_object import YCBObject
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.render.profiler import Profiler
+from igibson.robots.turtlebot_robot import Turtlebot
+from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
+from igibson.simulator import Simulator
+from igibson.utils.utils import parse_config
+
+
+def main():
+ config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml"))
+ settings = MeshRendererSettings(enable_shadow=False, msaa=False)
+ s = Simulator(mode="gui", image_width=256, image_height=256, rendering_settings=settings)
+
+ scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True)
+ s.import_scene(scene)
+ turtlebot = Turtlebot(config)
+ s.import_robot(turtlebot)
+
+ for _ in range(10):
+ obj = YCBObject("003_cracker_box")
+ s.import_object(obj)
+ obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
+
+ print(s.renderer.instances)
+
+ for i in range(10000):
+ with Profiler("Simulator step"):
+ turtlebot.apply_action([0.1, 0.1])
+ s.step()
+ rgb = s.renderer.render_robot_cameras(modes=("rgb"))
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/sliceable_reset_demo.py b/igibson/examples/demo/sliceable_reset_demo.py
new file mode 100644
index 000000000..8b8a3215c
--- /dev/null
+++ b/igibson/examples/demo/sliceable_reset_demo.py
@@ -0,0 +1,84 @@
+import os
+
+import numpy as np
+import pybullet as p
+
+from igibson import object_states
+from igibson.objects.articulated_object import URDFObject
+from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
+from igibson.scenes.empty_scene import EmptyScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import get_ig_model_path
+
+
+def main():
+ s = Simulator(mode="gui", image_width=1280, image_height=720)
+ scene = EmptyScene()
+ s.import_scene(scene)
+
+ model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf")
+ desk = URDFObject(
+ filename=model_path, category="table", name="19898", scale=np.array([0.8, 0.8, 0.8]), abilities={}
+ )
+ s.import_object(desk)
+ desk.set_position([0, 0, 0.4])
+
+ model_path = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf")
+ simulator_obj = URDFObject(
+ model_path, name="00_0", category="apple", scale=np.array([1.0, 1.0, 1.0]), initial_pos=[100, 100, -100]
+ )
+
+ whole_object = simulator_obj
+ object_parts = []
+
+ for i, part in enumerate(simulator_obj.metadata["object_parts"]):
+ category = part["category"]
+ model = part["model"]
+ # Scale the offset accordingly
+ part_pos = part["pos"] * whole_object.scale
+ part_orn = part["orn"]
+ model_path = get_ig_model_path(category, model)
+ filename = os.path.join(model_path, model + ".urdf")
+ obj_name = whole_object.name + "_part_{}".format(i)
+ simulator_obj_part = URDFObject(
+ filename,
+ name=obj_name,
+ category=category,
+ model_path=model_path,
+ scale=whole_object.scale,
+ initial_pos=[100 + i, 100, -100],
+ )
+ object_parts.append((simulator_obj_part, (part_pos, part_orn)))
+
+ grouped_obj_parts = ObjectGrouper(object_parts)
+ apple = ObjectMultiplexer(whole_object.name + "_multiplexer", [whole_object, grouped_obj_parts], 0)
+
+ s.import_object(apple)
+ apple.set_position([0, 0, 0.72])
+
+ # Let the apple fall
+ for _ in range(100):
+ s.step()
+
+ # Dump the initial state.
+ state_dump = p.saveState()
+ dump = apple.dump_state()
+ print(dump)
+
+ # Slice the apple and set the object parts away
+ apple.states[object_states.Sliced].set_value(True)
+ assert isinstance(apple.current_selection(), ObjectGrouper)
+ for obj_part in apple.objects:
+ obj_part.set_position([0, 0, 1])
+
+ p.restoreState(state_dump)
+ p.removeState(state_dump)
+ # The apple should become whole again
+ apple.load_state(dump)
+
+ while True:
+ s.step()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/demo/stable_baselines3_behavior_example.py b/igibson/examples/demo/stable_baselines3_behavior_example.py
new file mode 100644
index 000000000..af3f6c4b7
--- /dev/null
+++ b/igibson/examples/demo/stable_baselines3_behavior_example.py
@@ -0,0 +1,153 @@
+import os
+from typing import Callable
+
+import igibson
+from igibson.envs.behavior_env import BehaviorEnv
+
+try:
+ import gym
+ import torch as th
+ import torch.nn as nn
+ from stable_baselines3 import PPO
+ from stable_baselines3.common.evaluation import evaluate_policy
+ from stable_baselines3.common.preprocessing import maybe_transpose
+ from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
+ from stable_baselines3.common.utils import set_random_seed
+ from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
+
+except ModuleNotFoundError:
+ print("stable-baselines3 is not installed. You would need to do: pip install stable-baselines3")
+ exit(1)
+
+
+"""
+Example training code using stable-baselines3 PPO for one BEHAVIOR activity.
+Note that due to the sparsity of the reward, this training code will not converge and achieve task success.
+This only serves as a starting point that users can further build upon.
+"""
+
+
+class CustomCombinedExtractor(BaseFeaturesExtractor):
+ def __init__(self, observation_space: gym.spaces.Dict):
+ # We do not know features-dim here before going over all the items,
+ # so put something dummy for now. PyTorch requires calling
+ # nn.Module.__init__ before adding modules
+ super(CustomCombinedExtractor, self).__init__(observation_space, features_dim=1)
+
+ extractors = {}
+
+ total_concat_size = 0
+ feature_size = 128
+ for key, subspace in observation_space.spaces.items():
+ if key in ["proprioception", "task_obs"]:
+ extractors[key] = nn.Sequential(nn.Linear(subspace.shape[0], feature_size), nn.ReLU())
+ elif key in ["rgb", "highlight", "depth", "seg", "ins_seg"]:
+ n_input_channels = subspace.shape[2] # channel last
+ cnn = nn.Sequential(
+ nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
+ nn.ReLU(),
+ nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
+ nn.ReLU(),
+ nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
+ nn.ReLU(),
+ nn.Flatten(),
+ )
+ test_tensor = th.zeros([subspace.shape[2], subspace.shape[0], subspace.shape[1]])
+ with th.no_grad():
+ n_flatten = cnn(test_tensor[None]).shape[1]
+ fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU())
+ extractors[key] = nn.Sequential(cnn, fc)
+ elif key in ["scan"]:
+ n_input_channels = subspace.shape[1] # channel last
+ cnn = nn.Sequential(
+ nn.Conv1d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
+ nn.ReLU(),
+ nn.Conv1d(32, 64, kernel_size=4, stride=2, padding=0),
+ nn.ReLU(),
+ nn.Conv1d(64, 64, kernel_size=3, stride=1, padding=0),
+ nn.ReLU(),
+ nn.Flatten(),
+ )
+ test_tensor = th.zeros([subspace.shape[1], subspace.shape[0]])
+ with th.no_grad():
+ n_flatten = cnn(test_tensor[None]).shape[1]
+ fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU())
+ extractors[key] = nn.Sequential(cnn, fc)
+ else:
+ raise ValueError("Unknown observation key: %s" % key)
+ total_concat_size += feature_size
+
+ self.extractors = nn.ModuleDict(extractors)
+
+ # Update the features dim manually
+ self._features_dim = total_concat_size
+
+ def forward(self, observations) -> th.Tensor:
+ encoded_tensor_list = []
+
+ # self.extractors contain nn.Modules that do all the processing.
+ for key, extractor in self.extractors.items():
+ if key in ["rgb", "highlight", "depth", "seg", "ins_seg"]:
+ observations[key] = observations[key].permute((0, 3, 1, 2))
+ elif key in ["scan"]:
+ observations[key] = observations[key].permute((0, 2, 1))
+ encoded_tensor_list.append(extractor(observations[key]))
+ # Return a (B, self._features_dim) PyTorch tensor, where B is batch dimension.
+ return th.cat(encoded_tensor_list, dim=1)
+
+
+def main():
+ config_file = "behavior_onboard_sensing.yaml"
+ tensorboard_log_dir = "log_dir"
+ num_cpu = 8
+
+ def make_env(rank: int, seed: int = 0) -> Callable:
+ def _init() -> BehaviorEnv:
+ env = BehaviorEnv(
+ config_file=os.path.join(igibson.example_config_path, config_file),
+ mode="headless",
+ action_timestep=1 / 30.0,
+ physics_timestep=1 / 300.0,
+ )
+ env.seed(seed + rank)
+ return env
+
+ set_random_seed(seed)
+ return _init
+
+ env = SubprocVecEnv([make_env(i) for i in range(num_cpu)])
+ env = VecMonitor(env)
+
+ eval_env = BehaviorEnv(
+ config_file=os.path.join(igibson.example_config_path, config_file),
+ mode="headless",
+ action_timestep=1 / 30.0,
+ physics_timestep=1 / 300.0,
+ )
+
+ policy_kwargs = dict(
+ features_extractor_class=CustomCombinedExtractor,
+ )
+ os.makedirs(tensorboard_log_dir, exist_ok=True)
+ model = PPO("MultiInputPolicy", env, verbose=1, tensorboard_log=tensorboard_log_dir, policy_kwargs=policy_kwargs)
+ print(model.policy)
+
+ # Random Agent, before training
+ mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10)
+ print(f"Before Training: Mean reward: {mean_reward} +/- {std_reward:.2f}")
+
+ model.learn(1000000)
+
+ mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20)
+ print(f"After Training: Mean reward: {mean_reward} +/- {std_reward:.2f}")
+
+ model.save("ckpt")
+ del model
+
+ model = PPO.load("ckpt")
+ mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20)
+ print(f"After Loading: Mean reward: {mean_reward} +/- {std_reward:.2f}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/learning/stable_baselines3_example.py b/igibson/examples/demo/stable_baselines3_example.py
similarity index 86%
rename from igibson/examples/learning/stable_baselines3_example.py
rename to igibson/examples/demo/stable_baselines3_example.py
index d6b9bf71a..263dc0f25 100644
--- a/igibson/examples/learning/stable_baselines3_example.py
+++ b/igibson/examples/demo/stable_baselines3_example.py
@@ -1,4 +1,3 @@
-import logging
import os
from typing import Callable
@@ -96,17 +95,10 @@ def forward(self, observations) -> th.Tensor:
def main():
- """
- Example to set a training process with Stable Baselines 3
- Loads a scene and starts the training process for a navigation task with images using PPO
- Saves the checkpoint and loads it again
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- config_file = "turtlebot_nav.yaml"
+ config_file = "turtlebot_point_nav.yaml"
tensorboard_log_dir = "log_dir"
- num_environments = 8
+ num_cpu = 8
- # Function callback to create environments
def make_env(rank: int, seed: int = 0) -> Callable:
def _init() -> iGibsonEnv:
env = iGibsonEnv(
@@ -121,11 +113,9 @@ def _init() -> iGibsonEnv:
set_random_seed(seed)
return _init
- # Multiprocess
- env = SubprocVecEnv([make_env(i) for i in range(num_environments)])
+ env = SubprocVecEnv([make_env(i) for i in range(num_cpu)])
env = VecMonitor(env)
- # Create a new environment for evaluation
eval_env = iGibsonEnv(
config_file=os.path.join(igibson.example_config_path, config_file),
mode="headless",
@@ -133,7 +123,6 @@ def _init() -> iGibsonEnv:
physics_timestep=1 / 120.0,
)
- # Obtain the arguments/parameters for the policy and create the PPO model
policy_kwargs = dict(
features_extractor_class=CustomCombinedExtractor,
)
@@ -141,25 +130,19 @@ def _init() -> iGibsonEnv:
model = PPO("MultiInputPolicy", env, verbose=1, tensorboard_log=tensorboard_log_dir, policy_kwargs=policy_kwargs)
print(model.policy)
- # Random Agent, evaluation before training
+ # Random Agent, before training
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10)
print(f"Before Training: Mean reward: {mean_reward} +/- {std_reward:.2f}")
- # Train the model for the given number of steps
model.learn(1000000)
- # Evaluate the policy after training
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20)
print(f"After Training: Mean reward: {mean_reward} +/- {std_reward:.2f}")
- # Save the trained model and delete it
model.save("ckpt")
del model
- # Reload the trained model from file
model = PPO.load("ckpt")
-
- # Evaluate the trained model loaded from file
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20)
print(f"After Loading: Mean reward: {mean_reward} +/- {std_reward:.2f}")
diff --git a/igibson/examples/object_states/temperature_demo.py b/igibson/examples/demo/temperature_example.py
similarity index 66%
rename from igibson/examples/object_states/temperature_demo.py
rename to igibson/examples/demo/temperature_example.py
index 472e99f8a..1d3424460 100644
--- a/igibson/examples/object_states/temperature_demo.py
+++ b/igibson/examples/demo/temperature_example.py
@@ -1,4 +1,3 @@
-import logging
import os
import numpy as np
@@ -8,27 +7,18 @@
from igibson.objects.articulated_object import URDFObject
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
+from igibson.utils.assets_utils import download_assets
+
+download_assets()
def main():
- """
- Demo of temperature change
- Loads a stove, a microwave and an oven, all toggled on, and five frozen apples
- The user can move the apples to see them change from frozen, to normal temperature, to cooked and burnt
- This demo also shows how to load objects ToggledOn and how to set the initial temperature of an object
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- s = Simulator(mode="gui_interactive", image_width=1280, image_height=720)
- # Set a better viewing direction
- s.viewer.initial_pos = [0.0, -0.7, 2.1]
- s.viewer.initial_view_direction = [0.0, 0.8, -0.7]
- s.viewer.reset_viewer()
+ s = Simulator(mode="iggui", image_width=1280, image_height=720)
try:
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
+ scene = EmptyScene()
s.import_scene(scene)
- # Load stove ON
stove_dir = os.path.join(igibson.ig_dataset_path, "objects/stove/101930/")
stove_urdf = os.path.join(stove_dir, "101930.urdf")
stove = URDFObject(stove_urdf, name="stove", category="stove", model_path=stove_dir)
@@ -36,7 +26,6 @@ def main():
stove.set_position([0, 0, 0.782])
stove.states[object_states.ToggledOn].set_value(True)
- # Load microwave ON
microwave_dir = os.path.join(igibson.ig_dataset_path, "objects/microwave/7128/")
microwave_urdf = os.path.join(microwave_dir, "7128.urdf")
microwave = URDFObject(microwave_urdf, name="microwave", category="microwave", model_path=microwave_dir)
@@ -44,7 +33,6 @@ def main():
microwave.set_position([2, 0, 0.401])
microwave.states[object_states.ToggledOn].set_value(True)
- # Load oven ON
oven_dir = os.path.join(igibson.ig_dataset_path, "objects/oven/7120/")
oven_urdf = os.path.join(oven_dir, "7120.urdf")
oven = URDFObject(oven_urdf, name="oven", category="oven", model_path=oven_dir)
@@ -52,14 +40,12 @@ def main():
oven.set_position([-2, 0, 0.816])
oven.states[object_states.ToggledOn].set_value(True)
- # Load tray
tray_dir = os.path.join(igibson.ig_dataset_path, "objects/tray/tray_000/")
tray_urdf = os.path.join(tray_dir, "tray_000.urdf")
tray = URDFObject(tray_urdf, name="tray", category="tray", model_path=tray_dir, scale=np.array([0.1, 0.1, 0.1]))
s.import_object(tray)
tray.set_position([0, 0, 1.55])
- # Load fridge
fridge_dir = os.path.join(igibson.ig_dataset_path, "objects/fridge/12252/")
fridge_urdf = os.path.join(fridge_dir, "12252.urdf")
fridge = URDFObject(
@@ -77,34 +63,32 @@ def main():
s.import_object(fridge)
fridge.set_position_orientation([-1, -3, 0.75], [1, 0, 0, 0])
- # Load 5 apples
apple_dir = os.path.join(igibson.ig_dataset_path, "objects/apple/00_0/")
apple_urdf = os.path.join(apple_dir, "00_0.urdf")
+
apples = []
for i in range(5):
apple = URDFObject(apple_urdf, name="apple", category="apple", model_path=apple_dir)
s.import_object(apple)
apple.set_position([0, i * 0.05, 1.65])
apples.append(apple)
+
s.step()
- # Set initial temperature of the apples to -50 degrees Celsius
for apple in apples:
apple.states[object_states.Temperature].set_value(-50)
+ # Run simulation for 1000 steps
while True:
s.step()
- for idx, apple in enumerate(apples):
- logging.info(
- "Apple %d Temperature: %.2f. Frozen: %r. Cooked: %r. Burnt: %r."
- % (
- idx + 1,
- apple.states[object_states.Temperature].get_value(),
- apple.states[object_states.Frozen].get_value(),
- apple.states[object_states.Cooked].get_value(),
- apple.states[object_states.Burnt].get_value(),
- )
+ print(
+ "Apple Temperature: %.2f. Frozen: %r. Cooked: %r. Burnt: %r."
+ % (
+ apple.states[object_states.Temperature].get_value(),
+ apple.states[object_states.Frozen].get_value(),
+ apple.states[object_states.Cooked].get_value(),
+ apple.states[object_states.Burnt].get_value(),
)
- print()
+ )
finally:
s.disconnect()
diff --git a/igibson/examples/demo/texture_change_example.py b/igibson/examples/demo/texture_change_example.py
new file mode 100644
index 000000000..0d11250e1
--- /dev/null
+++ b/igibson/examples/demo/texture_change_example.py
@@ -0,0 +1,42 @@
+import os
+
+import igibson
+from igibson import object_states
+from igibson.objects.articulated_object import URDFObject
+from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
+from igibson.render.profiler import Profiler
+from igibson.scenes.empty_scene import EmptyScene
+from igibson.simulator import Simulator
+from igibson.utils.assets_utils import get_ig_model_path
+from igibson.utils.utils import parse_config
+
+
+def main():
+ config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml"))
+ settings = MeshRendererSettings(enable_shadow=False, msaa=False, optimized=True)
+ s = Simulator(mode="gui", image_width=512, image_height=512, rendering_settings=settings)
+
+ scene = EmptyScene()
+ s.import_scene(scene)
+
+ apple_dir = get_ig_model_path("apple", "00_0")
+ apple_urdf = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf")
+
+ apple = URDFObject(apple_urdf, name="apple", category="apple", model_path=apple_dir)
+
+ s.import_object(apple)
+ apple.set_position([0, 0, 0.2])
+ temp = 0
+ for i in range(10000):
+ if i % 10 == 0:
+ temp += 1
+ print(temp)
+ apple.states[object_states.Temperature].set_value(temp)
+ print(apple.states[object_states.Cooked].get_value())
+ with Profiler("Simulator step"):
+ s.step()
+ s.disconnect()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/examples/robots/trav_map_vis_example.py b/igibson/examples/demo/trav_map_vis_example.py
similarity index 100%
rename from igibson/examples/robots/trav_map_vis_example.py
rename to igibson/examples/demo/trav_map_vis_example.py
diff --git a/igibson/examples/vr/data_save_replay/vr_sr.py b/igibson/examples/demo/vr_demos/data_save_replay/vr_sr.py
similarity index 97%
rename from igibson/examples/vr/data_save_replay/vr_sr.py
rename to igibson/examples/demo/vr_demos/data_save_replay/vr_sr.py
index 6d0c5524e..b1fd68d43 100644
--- a/igibson/examples/vr/data_save_replay/vr_sr.py
+++ b/igibson/examples/demo/vr_demos/data_save_replay/vr_sr.py
@@ -75,7 +75,7 @@ def run_action_sr(mode):
scene = InteractiveIndoorScene(
"Rs_int", load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"]
)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
# Note: uncomment these lines during replay to see the scene from an external perspective
@@ -88,7 +88,7 @@ def run_action_sr(mode):
# Data replay uses constraints during both save and replay modes
# Note: set show_visual_head to True upon replay to see the VR head
bvr_robot = BehaviorRobot(s, show_visual_head=False)
- s.import_robot(bvr_robot)
+ s.import_behavior_robot(bvr_robot)
s.register_main_vr_robot(bvr_robot)
bvr_robot.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1])
@@ -111,8 +111,8 @@ def run_action_sr(mode):
fpath = item[0]
pos = item[1]
orn = item[2]
- item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(item_ob)
+ item_ob = ArticulatedObject(fpath, scale=1)
+ s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
@@ -185,7 +185,7 @@ def run_action_sr(mode):
# Get relevant VR action data and update VR agent
bvr_robot.update(log_reader.get_agent_action("vr_robot"))
- # vr_agent.links['eye'].show_eye()
+ # vr_agent.parts['eye'].show_eye()
# Print debugging information
if PRINT_PB:
diff --git a/igibson/examples/vr/in_development/vr_body_tracker_test.py b/igibson/examples/demo/vr_demos/in_development/vr_body_tracker_test.py
similarity index 92%
rename from igibson/examples/vr/in_development/vr_body_tracker_test.py
rename to igibson/examples/demo/vr_demos/in_development/vr_body_tracker_test.py
index ba99444ab..fb7abb0bb 100644
--- a/igibson/examples/vr/in_development/vr_body_tracker_test.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_body_tracker_test.py
@@ -10,7 +10,7 @@
def main():
s = Simulator(mode="vr", rendering_settings=MeshRendererSettings(enable_shadow=True, optimized=True))
scene = EmptyScene()
- s.import_scene(scene)
+ s.import_scene(scene, render_floor_plane=True)
vr_agent = BehaviorRobot(s)
# Main simulation loop
diff --git a/igibson/examples/vr/in_development/vr_button_mapping.py b/igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py
similarity index 95%
rename from igibson/examples/vr/in_development/vr_button_mapping.py
rename to igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py
index dcf0f74d5..4ce2c73cc 100644
--- a/igibson/examples/vr/in_development/vr_button_mapping.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py
@@ -15,7 +15,7 @@
def main():
s = Simulator(mode="vr", rendering_settings=MeshRendererSettings(enable_shadow=True, optimized=True))
scene = EmptyScene()
- s.import_scene(scene)
+ s.import_scene(scene, render_floor_plane=True)
# Main simulation loop
while True:
diff --git a/igibson/examples/vr/in_development/vr_cleaning_demo.py b/igibson/examples/demo/vr_demos/in_development/vr_cleaning_demo.py
similarity index 90%
rename from igibson/examples/vr/in_development/vr_cleaning_demo.py
rename to igibson/examples/demo/vr_demos/in_development/vr_cleaning_demo.py
index fe0376ab4..ddd518ac9 100644
--- a/igibson/examples/vr/in_development/vr_cleaning_demo.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_cleaning_demo.py
@@ -5,6 +5,7 @@
import igibson
from igibson import object_states
+from igibson.object_states.factory import prepare_object_states
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
@@ -44,7 +45,7 @@
if VIEWER_MANIP:
s = Simulator(
- mode="gui_interactive",
+ mode="iggui",
image_width=512,
image_height=512,
rendering_settings=vr_rendering_settings,
@@ -54,14 +55,16 @@
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=vr_settings)
scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False)
-s.import_scene(scene)
+s.import_ig_scene(scene)
if not VIEWER_MANIP:
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=False)
-block = YCBObject(name="036_wood_block", abilities={"soakable": {}, "cleaning_tool": {}})
+block = YCBObject(name="036_wood_block")
s.import_object(block)
block.set_position([1, 1, 1.8])
+block.abilities = ["soakable", "cleaning_tool"]
+prepare_object_states(block, abilities={"soakable": {}, "cleaning_tool": {}})
# Set everything that can go dirty.
dirtyable_objects = set(
diff --git a/igibson/examples/vr/in_development/vr_hand_dex_benchmark.py b/igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py
similarity index 93%
rename from igibson/examples/vr/in_development/vr_hand_dex_benchmark.py
rename to igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py
index 154024890..cc0b3ee73 100644
--- a/igibson/examples/vr/in_development/vr_hand_dex_benchmark.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py
@@ -68,7 +68,7 @@ def main():
)
# scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER)
@@ -144,13 +144,11 @@ def main():
fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name]
for i in range(len(masses)):
if obj_type == "ycb":
- handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
+ handle = YCBObject(fpath, scale=scale)
elif obj_type == "pb":
- handle = ArticulatedObject(
- fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}
- )
+ handle = ArticulatedObject(fpath, scale=scale)
- s.import_object(handle)
+ s.import_object(handle, use_pbr=False, use_pbr_mapping=False)
# Calculate new position along spacing vector
new_pos = (
pos[0] + space_vec[0] * space_val * i,
@@ -159,8 +157,8 @@ def main():
)
handle.set_position(new_pos)
handle.set_orientation(orn)
- p.changeDynamics(handle.get_body_id(), -1, mass=masses[i])
- minBox, maxBox = p.getAABB(handle.get_body_id())
+ p.changeDynamics(handle.body_id, -1, mass=masses[i])
+ minBox, maxBox = p.getAABB(handle.body_id)
dims = [maxBox[i] - minBox[i] for i in range(3)]
print("Name {} and masses: {}".format(name, masses))
print("XYZ dimensions: {}".format(dims))
@@ -286,14 +284,14 @@ def main():
orn = item["orn"]
scale = item["scale"]
mass = item["mass"]
- item_ob = ArticulatedObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(item_ob)
+ item_ob = ArticulatedObject(fpath, scale=scale)
+ s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
objs_loaded.append(item_ob)
- minBox, maxBox = p.getAABB(item_ob.get_body_id())
+ minBox, maxBox = p.getAABB(item_ob.body_id)
dims = [maxBox[i] - minBox[i] for i in range(3)]
- p.changeDynamics(item_ob.get_body_id(), -1, mass=mass)
+ p.changeDynamics(item_ob.body_id, -1, mass=mass)
print("Name {} and mass: {}".format(it_name, mass))
print("XYZ dimensions: {}".format(dims))
@@ -317,7 +315,7 @@ def main():
vr_agent.apply_action()
if show_overlay:
- ag_candidate_data = vr_agent.links["right_hand"].candidate_data
+ ag_candidate_data = vr_agent.parts["right_hand"].candidate_data
if ag_candidate_data:
t = ""
for bid, link, dist in ag_candidate_data:
diff --git a/igibson/examples/vr/in_development/vr_hand_speed_benchmark.py b/igibson/examples/demo/vr_demos/in_development/vr_hand_speed_benchmark.py
similarity index 96%
rename from igibson/examples/vr/in_development/vr_hand_speed_benchmark.py
rename to igibson/examples/demo/vr_demos/in_development/vr_hand_speed_benchmark.py
index e78ed0b05..b08de0f98 100644
--- a/igibson/examples/vr/in_development/vr_hand_speed_benchmark.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_hand_speed_benchmark.py
@@ -55,7 +55,7 @@ def main():
if VIEWER_MANIP:
s = Simulator(
- mode="gui_interactive",
+ mode="iggui",
image_width=512,
image_height=512,
rendering_settings=vr_rendering_settings,
@@ -65,7 +65,7 @@ def main():
scene = InteractiveIndoorScene("Rs_int")
scene._set_first_n_objects(2)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
if not VIEWER_MANIP:
@@ -89,8 +89,8 @@ def main():
fpath = item[0]
pos = item[1]
orn = item[2]
- item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(item_ob)
+ item_ob = ArticulatedObject(fpath, scale=1)
+ s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
diff --git a/igibson/examples/vr/in_development/vr_playground.py b/igibson/examples/demo/vr_demos/in_development/vr_playground.py
similarity index 93%
rename from igibson/examples/vr/in_development/vr_playground.py
rename to igibson/examples/demo/vr_demos/in_development/vr_playground.py
index ed4a3734d..e292ceb93 100644
--- a/igibson/examples/vr/in_development/vr_playground.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_playground.py
@@ -65,7 +65,7 @@ def main():
# Turn this on when debugging to speed up loading
if LOAD_PARTIAL:
scene._set_first_n_objects(10)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
if not VR_MODE:
camera_pose = np.array([0, -3, 1.2])
@@ -84,10 +84,10 @@ def main():
mass_list = [5, 10, 100, 500]
mustard_start = [-1, 1.55, 1.2]
for i in range(len(mass_list)):
- mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(mustard)
+ mustard = YCBObject("006_mustard_bottle")
+ s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True)
mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]])
- p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i])
+ p.changeDynamics(mustard.body_id, -1, mass=mass_list[i])
# Main simulation loop
while True:
diff --git a/igibson/examples/vr/in_development/vr_sample_hud.py b/igibson/examples/demo/vr_demos/in_development/vr_sample_hud.py
similarity index 94%
rename from igibson/examples/vr/in_development/vr_sample_hud.py
rename to igibson/examples/demo/vr_demos/in_development/vr_sample_hud.py
index cb0f35585..848e7401a 100644
--- a/igibson/examples/vr/in_development/vr_sample_hud.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_sample_hud.py
@@ -70,7 +70,7 @@ def main():
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings)
scene = InteractiveIndoorScene("Rs_int")
scene._set_obj_names_to_load(benchmark_names)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER)
@@ -146,13 +146,11 @@ def main():
fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name]
for i in range(len(masses)):
if obj_type == "ycb":
- handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
+ handle = YCBObject(fpath, scale=scale)
elif obj_type == "pb":
- handle = ArticulatedObject(
- fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}
- )
+ handle = ArticulatedObject(fpath, scale=scale)
- s.import_object(handle)
+ s.import_object(handle, use_pbr=False, use_pbr_mapping=False)
# Calculate new position along spacing vector
new_pos = (
pos[0] + space_vec[0] * space_val * i,
@@ -161,7 +159,7 @@ def main():
)
handle.set_position(new_pos)
handle.set_orientation(orn)
- p.changeDynamics(handle.get_body_id(), -1, mass=masses[i])
+ p.changeDynamics(handle.body_id, -1, mass=masses[i])
title = s.add_vr_overlay_text(
text_data="Welcome to iGibson VR!", font_size=85, font_style="Bold", color=[0, 0, 0.5], pos=[150, 900]
diff --git a/igibson/examples/vr/in_development/vr_tracker_test.py b/igibson/examples/demo/vr_demos/in_development/vr_tracker_test.py
similarity index 94%
rename from igibson/examples/vr/in_development/vr_tracker_test.py
rename to igibson/examples/demo/vr_demos/in_development/vr_tracker_test.py
index 8b318b1ee..892d1d507 100644
--- a/igibson/examples/vr/in_development/vr_tracker_test.py
+++ b/igibson/examples/demo/vr_demos/in_development/vr_tracker_test.py
@@ -14,7 +14,7 @@
def main():
s = Simulator(mode="vr", rendering_settings=MeshRendererSettings(enable_shadow=True, optimized=True))
scene = EmptyScene()
- s.import_scene(scene)
+ s.import_scene(scene, render_floor_plane=True)
# Main simulation loop
while True:
diff --git a/igibson/examples/vr/muvr/muvr_demo.py b/igibson/examples/demo/vr_demos/muvr/muvr_demo.py
similarity index 95%
rename from igibson/examples/vr/muvr/muvr_demo.py
rename to igibson/examples/demo/vr_demos/muvr/muvr_demo.py
index 10096fe67..0cb66eb5e 100644
--- a/igibson/examples/vr/muvr/muvr_demo.py
+++ b/igibson/examples/demo/vr_demos/muvr/muvr_demo.py
@@ -68,7 +68,7 @@ def run_muvr(mode="server", host="localhost", port="8885"):
scene = InteractiveIndoorScene("Rs_int")
if LOAD_PARTIAL:
scene._set_first_n_objects(10)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
# Default camera for non-VR MUVR users
if not vr_settings.use_vr:
@@ -85,10 +85,10 @@ def run_muvr(mode="server", host="localhost", port="8885"):
mass_list = [5, 10, 20, 30]
mustard_start = [-1, 1.55, 1.2]
for i in range(len(mass_list)):
- mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(mustard)
+ mustard = YCBObject("006_mustard_bottle")
+ s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True)
mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]])
- p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i])
+ p.changeDynamics(mustard.body_id, -1, mass=mass_list[i])
# Start the two agents at different points so they don't collide upon entering the scene
if vr_settings.use_vr:
@@ -102,7 +102,7 @@ def run_muvr(mode="server", host="localhost", port="8885"):
vr_client = IGVRClient(host, port)
vr_client.register_data(s, client_agent)
# Disconnect pybullet since the client only renders
- s.disconnect(release_renderer=False)
+ s.disconnect_pybullet()
# Main networking loop
while True:
diff --git a/igibson/examples/vr/robot_embodiment/vr_demo_robot_explore.py b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_explore.py
similarity index 87%
rename from igibson/examples/vr/robot_embodiment/vr_demo_robot_explore.py
rename to igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_explore.py
index 074b38a76..268a2d778 100644
--- a/igibson/examples/vr/robot_embodiment/vr_demo_robot_explore.py
+++ b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_explore.py
@@ -9,6 +9,7 @@
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
+from igibson.robots.fetch_vr_robot import FetchVR
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
@@ -46,11 +47,7 @@
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(vr_fps=VR_FPS))
else:
s = Simulator(
- mode="gui_interactive",
- image_width=960,
- image_height=720,
- device_idx=0,
- rendering_settings=vr_rendering_settings,
+ mode="iggui", image_width=960, image_height=720, device_idx=0, rendering_settings=vr_rendering_settings
)
s.viewer.min_cam_z = 1.0
@@ -58,7 +55,7 @@
# Turn this on when debugging to speed up loading
if LOAD_PARTIAL:
scene._set_first_n_objects(10)
-s.import_scene(scene)
+s.import_ig_scene(scene)
if not VR_MODE:
camera_pose = np.array([0, -3, 1.2])
@@ -73,10 +70,10 @@
mass_list = [5, 10, 100, 500]
mustard_start = [-1, 1.55, 1.2]
for i in range(len(mass_list)):
- mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(mustard)
+ mustard = YCBObject("006_mustard_bottle")
+ s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True)
mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]])
- p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i])
+ p.changeDynamics(mustard.body_id, -1, mass=mass_list[i])
# Main simulation loop
while True:
diff --git a/igibson/examples/vr/robot_embodiment/vr_demo_robot_grasping.py b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_grasping.py
similarity index 92%
rename from igibson/examples/vr/robot_embodiment/vr_demo_robot_grasping.py
rename to igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_grasping.py
index bf41a2156..8e55e5006 100644
--- a/igibson/examples/vr/robot_embodiment/vr_demo_robot_grasping.py
+++ b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_grasping.py
@@ -11,6 +11,7 @@
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
+from igibson.robots.fetch_vr_robot import FetchVR
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
@@ -46,17 +47,13 @@
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(vr_fps=VR_FPS))
else:
s = Simulator(
- mode="gui_interactive",
- image_width=960,
- image_height=720,
- device_idx=0,
- rendering_settings=vr_rendering_settings,
+ mode="iggui", image_width=960, image_height=720, device_idx=0, rendering_settings=vr_rendering_settings
)
s.viewer.min_cam_z = 1.0
scene = InteractiveIndoorScene("Rs_int")
scene._set_first_n_objects(2)
-s.import_scene(scene)
+s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
if not VR_MODE:
@@ -86,8 +83,8 @@
fpath = item[0]
pos = item[1]
orn = item[2]
- item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(item_ob)
+ item_ob = ArticulatedObject(fpath, scale=1)
+ s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
diff --git a/igibson/examples/vr/test/muvr_lag_test.py b/igibson/examples/demo/vr_demos/test/muvr_lag_test.py
similarity index 100%
rename from igibson/examples/vr/test/muvr_lag_test.py
rename to igibson/examples/demo/vr_demos/test/muvr_lag_test.py
diff --git a/igibson/examples/vr/test/vr_condition_switch.py b/igibson/examples/demo/vr_demos/test/vr_condition_switch.py
similarity index 92%
rename from igibson/examples/vr/test/vr_condition_switch.py
rename to igibson/examples/demo/vr_demos/test/vr_condition_switch.py
index 039f57baf..a861b96ad 100644
--- a/igibson/examples/vr/test/vr_condition_switch.py
+++ b/igibson/examples/demo/vr_demos/test/vr_condition_switch.py
@@ -41,7 +41,7 @@ def main():
vr_settings = VrSettings()
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=vr_settings)
scene = InteractiveIndoorScene("Rs_int")
- s.import_scene(scene)
+ s.import_ig_scene(scene)
# Create a BehaviorRobot and it will handle all initialization and importing under-the-hood
# Change USE_GRIPPER to switch between the BRHand and the BRGripper (see robots/behavior_robot.py for more details)
@@ -57,10 +57,10 @@ def main():
mass_list = [5, 10, 100, 500]
mustard_start = [-1, 1.55, 1.2]
for i in range(len(mass_list)):
- mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(mustard)
+ mustard = YCBObject("006_mustard_bottle")
+ s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True)
mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]])
- p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i])
+ p.changeDynamics(mustard.body_id, -1, mass=mass_list[i])
# Main simulation loop
while True:
diff --git a/igibson/examples/vr/test/vr_hand_geom_vis.py b/igibson/examples/demo/vr_demos/test/vr_hand_geom_vis.py
similarity index 100%
rename from igibson/examples/vr/test/vr_hand_geom_vis.py
rename to igibson/examples/demo/vr_demos/test/vr_hand_geom_vis.py
diff --git a/igibson/examples/vr/test/vr_overlay_color_test.py b/igibson/examples/demo/vr_demos/test/vr_overlay_color_test.py
similarity index 94%
rename from igibson/examples/vr/test/vr_overlay_color_test.py
rename to igibson/examples/demo/vr_demos/test/vr_overlay_color_test.py
index 23ed6723b..3ce2cdb61 100644
--- a/igibson/examples/vr/test/vr_overlay_color_test.py
+++ b/igibson/examples/demo/vr_demos/test/vr_overlay_color_test.py
@@ -81,7 +81,7 @@ def main():
)
# scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=True)
@@ -157,13 +157,11 @@ def main():
fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name]
for i in range(len(masses)):
if obj_type == "ycb":
- handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
+ handle = YCBObject(fpath, scale=scale)
elif obj_type == "pb":
- handle = ArticulatedObject(
- fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}
- )
+ handle = ArticulatedObject(fpath, scale=scale)
- s.import_object(handle)
+ s.import_object(handle, use_pbr=False, use_pbr_mapping=False)
# Calculate new position along spacing vector
new_pos = (
pos[0] + space_vec[0] * space_val * i,
@@ -172,7 +170,7 @@ def main():
)
handle.set_position(new_pos)
handle.set_orientation(orn)
- p.changeDynamics(handle.get_body_id(), -1, mass=masses[i])
+ p.changeDynamics(handle.body_id, -1, mass=masses[i])
# Text position/size is described in percentage of axes in screen space
wrap_scroll_text = s.add_vr_overlay_text(
diff --git a/igibson/examples/vr/test/vr_scroll_wrap_text_test.py b/igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py
similarity index 94%
rename from igibson/examples/vr/test/vr_scroll_wrap_text_test.py
rename to igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py
index ea6b930a8..5d839a8d0 100644
--- a/igibson/examples/vr/test/vr_scroll_wrap_text_test.py
+++ b/igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py
@@ -81,7 +81,7 @@ def main():
)
# scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=True)
@@ -157,13 +157,11 @@ def main():
fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name]
for i in range(len(masses)):
if obj_type == "ycb":
- handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
+ handle = YCBObject(fpath, scale=scale)
elif obj_type == "pb":
- handle = ArticulatedObject(
- fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}
- )
+ handle = ArticulatedObject(fpath, scale=scale)
- s.import_object(handle)
+ s.import_object(handle, use_pbr=False, use_pbr_mapping=False)
# Calculate new position along spacing vector
new_pos = (
pos[0] + space_vec[0] * space_val * i,
@@ -172,7 +170,7 @@ def main():
)
handle.set_position(new_pos)
handle.set_orientation(orn)
- p.changeDynamics(handle.get_body_id(), -1, mass=masses[i])
+ p.changeDynamics(handle.body_id, -1, mass=masses[i])
# Text position/size is described in percentage of axes in screen space
wrap_scroll_text = s.add_vr_overlay_text(
diff --git a/igibson/examples/vr/vr_gaze_test.py b/igibson/examples/demo/vr_demos/vr_gaze_test.py
similarity index 92%
rename from igibson/examples/vr/vr_gaze_test.py
rename to igibson/examples/demo/vr_demos/vr_gaze_test.py
index 7ca1555ad..6ebcf4789 100644
--- a/igibson/examples/vr/vr_gaze_test.py
+++ b/igibson/examples/demo/vr_demos/vr_gaze_test.py
@@ -44,7 +44,7 @@ def main():
scene = InteractiveIndoorScene(
"Rs_int", load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"]
)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
objects = [
@@ -65,8 +65,8 @@ def main():
fpath = item[0]
pos = item[1]
orn = item[2]
- item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(item_ob)
+ item_ob = ArticulatedObject(fpath, scale=1)
+ s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
@@ -84,15 +84,13 @@ def main():
obj.set_position_orientation([1.1, 0.300000, 1.0], [0, 0, 0, 1])
bvr_robot = BehaviorRobot(s)
- s.import_robot(bvr_robot)
+ s.import_behavior_robot(bvr_robot)
s.register_main_vr_robot(bvr_robot)
bvr_robot.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1])
# Represents gaze
- eye_marker = ArticulatedObject(
- "sphere_small.urdf", scale=2, renderer_params={"use_pbr": False, "use_pbr_mapping": False}
- )
- s.import_object(eye_marker)
+ eye_marker = ArticulatedObject("sphere_small.urdf", scale=2)
+ s.import_object(eye_marker, use_pbr=False, use_pbr_mapping=False)
gaze_max_dist = 1.5
# Main simulation loop
diff --git a/igibson/examples/vr/vr_simple_demo.py b/igibson/examples/demo/vr_demos/vr_simple_demo.py
similarity index 95%
rename from igibson/examples/vr/vr_simple_demo.py
rename to igibson/examples/demo/vr_demos/vr_simple_demo.py
index 8db68b37f..cb0071b04 100644
--- a/igibson/examples/vr/vr_simple_demo.py
+++ b/igibson/examples/demo/vr_demos/vr_simple_demo.py
@@ -43,7 +43,7 @@ def main():
scene = InteractiveIndoorScene(
"Rs_int", load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"]
)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
objects = [
@@ -64,8 +64,8 @@ def main():
fpath = item[0]
pos = item[1]
orn = item[2]
- item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
- s.import_object(item_ob)
+ item_ob = ArticulatedObject(fpath, scale=1)
+ s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
@@ -83,7 +83,7 @@ def main():
obj.set_position_orientation([1.1, 0.300000, 1.0], [0, 0, 0, 1])
bvr_robot = BehaviorRobot(s)
- s.import_robot(bvr_robot)
+ s.import_behavior_robot(bvr_robot)
s.register_main_vr_robot(bvr_robot)
bvr_robot.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1])
diff --git a/igibson/examples/web_ui/sampling_feedback_test.py b/igibson/examples/demo/web_ui/sampling_feedback_test.py
similarity index 100%
rename from igibson/examples/web_ui/sampling_feedback_test.py
rename to igibson/examples/demo/web_ui/sampling_feedback_test.py
diff --git a/igibson/examples/web_ui/sampling_ui.py b/igibson/examples/demo/web_ui/sampling_ui.py
similarity index 77%
rename from igibson/examples/web_ui/sampling_ui.py
rename to igibson/examples/demo/web_ui/sampling_ui.py
index 46c8b14d1..51df1a9f6 100644
--- a/igibson/examples/web_ui/sampling_ui.py
+++ b/igibson/examples/demo/web_ui/sampling_ui.py
@@ -17,11 +17,11 @@
from flask_cors import CORS
import igibson
-from igibson.envs.igibson_env import iGibsonEnv
+from igibson.activity.activity_base import iGBEHAVIORActivityInstance
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.simulator import Simulator
-from igibson.utils.utils import parse_config, restoreState
+from igibson.utils.utils import parse_config
interactive = True
NUM_REQUIRED_SUCCESSFUL_SCENES = 3
@@ -124,7 +124,7 @@ def sample(self, behavior_activity, bddl, blocking=True):
:param bddl (str): the bddl being sampled in the environment
:param blocking (bool): whether to wait for the result
- :return (bool, str): (success, feedback) from the sampling process
+ :return (bool, dict): (success, feedback) from the sampling process
"""
self.last_active_time = time.time()
promise = self.call("sample", behavior_activity, bddl)
@@ -213,6 +213,7 @@ def __init__(self):
self.s = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings)
scene = StaticIndoorScene("Rs")
self.s.import_scene(scene)
+ # self.s.import_ig_scene(scene)
def step(self, a):
self.s.step()
@@ -225,67 +226,116 @@ def close(self):
class ToyEnvInt(object):
def __init__(self, scene="Rs_int"):
- config_file = os.path.join(igibson.example_config_path, "behavior_vr.yaml")
- env_config = parse_config(config_file)
- env_config["scene_id"] = scene
- env_config["task"] = "trivial"
- env_config["task_id"] = 0
- env_config["online_sampling"] = True
- env_config["load_clutter"] = False
+ bddl.set_backend("iGibson")
+ self.task = iGBEHAVIORActivityInstance("trivial", activity_definition=0)
+
settings = MeshRendererSettings(texture_scale=0.01)
- self.env = iGibsonEnv(config_file=env_config, mode="headless", rendering_settings=settings)
+ simulator = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings)
+ self.task.initialize_simulator(
+ simulator=simulator,
+ scene_id=scene,
+ mode="headless",
+ load_clutter=False,
+ should_debug_sampling=False,
+ scene_kwargs={},
+ online_sampling=True,
+ )
self.state_id = p.saveState()
self.num_body_ids = p.getNumBodies()
- self.num_particle_systems = len(self.env.simulator.particle_systems)
+ self.num_particle_systems = len(self.task.simulator.particle_systems)
def step(self, a):
pass
def restore_scene(self):
- for sim_obj in self.env.task.newly_added_objects:
- self.env.scene.remove_object(sim_obj)
+ for sim_obj in self.task.newly_added_objects:
+ self.task.scene.remove_object(sim_obj)
- self.env.simulator.particle_systems = self.env.simulator.particle_systems[: self.num_particle_systems]
+ self.task.simulator.particle_systems = self.task.simulator.particle_systems[: self.num_particle_systems]
for body_id in range(self.num_body_ids, p.getNumBodies()):
p.removeBody(body_id)
- restoreState(self.state_id)
+ p.restoreState(self.state_id)
def sample(self, behavior_activity, bddl):
try:
- self.env.task.update_problem(behavior_activity, 0, predefined_problem=bddl)
+ self.task.update_problem(behavior_activity, "tester", predefined_problem=bddl)
+ self.task.object_scope["agent.n.01_1"] = self.task.agent.parts["body"]
except UncontrolledCategoryError:
accept_scene = False
- feedback = "Goal state has uncontrolled categories."
+ feedback = {
+ "init_success": "no",
+ "goal_success": "no",
+ "init_feedback": "Cannot check until goal state is fixed.",
+ "goal_feedback": "Goal state has uncontrolled categories.",
+ }
return accept_scene, feedback
except UnsupportedPredicateError as e:
accept_scene = False
- feedback = f"We don't yet support the [{e.predicate}] adjective for any objects. We will soon!"
+ feedback = {
+ "init_success": "no",
+ "goal_success": "no",
+ "init_feedback": f"We don't yet support the [{e.predicate}] adjective for any objects. We will soon!",
+ "goal_feedback": "",
+ }
return accept_scene, feedback
except AssertionError as message:
- accept_scene = False
if message == "No ground goal options":
- feedback = (
- "The goal conditions are logically impossible (there is no solution). Check for a contradiction (e.g. asking for the floor to be stained and not stained at the same time).",
- )
+ accept_scene = False
+ feedback = {
+ "init_success": "no",
+ "goal_success": "no",
+ "init_feedback": "",
+ "goal_feedback": "The goal conditions are logically impossible (there is no solution). Check for a contradiction (e.g. asking for the floor to be stained and not stained at the same time).",
+ }
else:
- feedback = "Let Sanjana know there was an indeterminate assertion error during problem update."
+ accept_scene = False
+ feedback = {
+ "init_success": "no",
+ "goal_success": "no",
+ "init_feedback": f"Let Sanjana know there was an indeterminate assertion error during problem update.",
+ "goal_feedback": "",
+ }
return accept_scene, feedback
try:
- accept_scene, feedback = self.env.task.initialize(self.env)
+ accept_scene, feedback = self.task.check_scene()
except AssertionError as message:
- accept_scene = False
- feedback = f"Let Sanjana know there was an assertion error during scene checking/sampling: {str(message)}"
+ if "Invalid" in str(message):
+ accept_scene = False
+ feedback = {
+ "init_success": "no",
+ "goal_success": "no",
+ "init_feedback": f"We do not currently support {str(message).split(' ')[3]}. Please try a different object!",
+ "goal_feedback": "",
+ }
+ else:
+ accept_scene = False
+ feedback = {
+ "init_success": "no",
+ "goal_success": "no",
+ "init_feedback": f"Let Sanjana know there was an indeterminate assertion error during scene checking.",
+ "goal_feedback": "",
+ }
+ return accept_scene, feedback
+
+ if not accept_scene:
+ self.restore_scene()
+ return accept_scene, feedback
+
+ accept_scene, feedback = self.task.sample(kinematic_only=True)
+ if not accept_scene:
self.restore_scene()
return accept_scene, feedback
self.restore_scene()
+
+ # self.last_active_time = time.time()
return accept_scene, feedback
def close(self):
- self.env.close()
+ self.task.simulator.disconnect()
class iGFlask(Flask):
@@ -406,7 +456,14 @@ def check_sampling():
success, feedback = app.envs[new_unique_id].sample(behavior_activity, bddl)
if success:
num_successful_scenes += 1
- feedback_instances.append(feedback)
+ """
+ init_success, goal_success: one of the three values ['yes', 'no', 'untested']
+ init_feedback, goal_feedback: feedback for the initial and goal conditions. They will be empty strings
+ if the conditions are not tested or the conditions are sampled successfully.
+ """
+ feedback_instances.append(
+ (feedback["init_success"], feedback["goal_success"], feedback["init_feedback"], feedback["goal_feedback"])
+ )
success = num_successful_scenes >= min(NUM_REQUIRED_SUCCESSFUL_SCENES, len(ids))
full_feedback = feedback_instances
diff --git a/igibson/examples/web_ui/static/Beechwood_0.gif b/igibson/examples/demo/web_ui/static/Beechwood_0.gif
similarity index 100%
rename from igibson/examples/web_ui/static/Beechwood_0.gif
rename to igibson/examples/demo/web_ui/static/Beechwood_0.gif
diff --git a/igibson/examples/web_ui/static/Merom_1.gif b/igibson/examples/demo/web_ui/static/Merom_1.gif
similarity index 100%
rename from igibson/examples/web_ui/static/Merom_1.gif
rename to igibson/examples/demo/web_ui/static/Merom_1.gif
diff --git a/igibson/examples/web_ui/static/Rs.gif b/igibson/examples/demo/web_ui/static/Rs.gif
similarity index 100%
rename from igibson/examples/web_ui/static/Rs.gif
rename to igibson/examples/demo/web_ui/static/Rs.gif
diff --git a/igibson/examples/web_ui/static/fetch.jpg b/igibson/examples/demo/web_ui/static/fetch.jpg
similarity index 100%
rename from igibson/examples/web_ui/static/fetch.jpg
rename to igibson/examples/demo/web_ui/static/fetch.jpg
diff --git a/igibson/examples/web_ui/static/igibson_logo.png b/igibson/examples/demo/web_ui/static/igibson_logo.png
similarity index 100%
rename from igibson/examples/web_ui/static/igibson_logo.png
rename to igibson/examples/demo/web_ui/static/igibson_logo.png
diff --git a/igibson/examples/web_ui/static/turtlebot.jpg b/igibson/examples/demo/web_ui/static/turtlebot.jpg
similarity index 100%
rename from igibson/examples/web_ui/static/turtlebot.jpg
rename to igibson/examples/demo/web_ui/static/turtlebot.jpg
diff --git a/igibson/examples/web_ui/templates/demo.html b/igibson/examples/demo/web_ui/templates/demo.html
similarity index 100%
rename from igibson/examples/web_ui/templates/demo.html
rename to igibson/examples/demo/web_ui/templates/demo.html
diff --git a/igibson/examples/web_ui/templates/finished.jpg b/igibson/examples/demo/web_ui/templates/finished.jpg
similarity index 100%
rename from igibson/examples/web_ui/templates/finished.jpg
rename to igibson/examples/demo/web_ui/templates/finished.jpg
diff --git a/igibson/examples/web_ui/templates/index.html b/igibson/examples/demo/web_ui/templates/index.html
similarity index 100%
rename from igibson/examples/web_ui/templates/index.html
rename to igibson/examples/demo/web_ui/templates/index.html
diff --git a/igibson/examples/web_ui/templates/loading.jpg b/igibson/examples/demo/web_ui/templates/loading.jpg
similarity index 100%
rename from igibson/examples/web_ui/templates/loading.jpg
rename to igibson/examples/demo/web_ui/templates/loading.jpg
diff --git a/igibson/examples/web_ui/templates/sampling_index.html b/igibson/examples/demo/web_ui/templates/sampling_index.html
similarity index 100%
rename from igibson/examples/web_ui/templates/sampling_index.html
rename to igibson/examples/demo/web_ui/templates/sampling_index.html
diff --git a/igibson/examples/web_ui/templates/waiting.jpg b/igibson/examples/demo/web_ui/templates/waiting.jpg
similarity index 100%
rename from igibson/examples/web_ui/templates/waiting.jpg
rename to igibson/examples/demo/web_ui/templates/waiting.jpg
diff --git a/igibson/examples/web_ui/web_ui.py b/igibson/examples/demo/web_ui/web_ui.py
similarity index 98%
rename from igibson/examples/web_ui/web_ui.py
rename to igibson/examples/demo/web_ui/web_ui.py
index 6d1d1fe5d..7e06416eb 100644
--- a/igibson/examples/web_ui/web_ui.py
+++ b/igibson/examples/demo/web_ui/web_ui.py
@@ -16,8 +16,8 @@
import igibson
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.robots.fetch import Fetch
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.fetch_robot import Fetch
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
@@ -203,6 +203,7 @@ def __init__(self):
self.s = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings)
scene = StaticIndoorScene("Rs")
self.s.import_scene(scene)
+ # self.s.import_ig_scene(scene)
self.robot = Turtlebot(config)
self.s.import_robot(self.robot)
@@ -254,7 +255,7 @@ def __init__(self, robot="turtlebot", scene="Rs_int"):
)
self.s = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings)
- self.s.import_scene(scene)
+ self.s.import_ig_scene(scene)
if robot == "turtlebot":
self.robot = Turtlebot(config)
diff --git a/igibson/examples/environments/config_selector.py b/igibson/examples/environments/config_selector.py
deleted file mode 100644
index ee1f5da78..000000000
--- a/igibson/examples/environments/config_selector.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import logging
-import os
-from random import random
-from sys import platform
-
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import folder_is_hidden, get_available_ig_scenes
-from igibson.utils.utils import let_user_pick
-
-
-def main(random_selection=False):
- """
- Prompts the user to select any available interactive scene and loads a turtlebot into it.
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- ig_config_path = igibson.example_config_path
- available_configs = sorted(
- [f for f in os.listdir(ig_config_path) if (not folder_is_hidden(f) and f != "background")]
- )
- config_id = available_configs[let_user_pick(available_configs, random_selection=random_selection) - 1]
- config_filename = os.path.join(igibson.example_config_path, config_id)
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- config_data["image_width"] = 512
- config_data["image_height"] = 512
- config_data["vertical_fov"] = 60
- # config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- action = env.action_space.sample() * 0.05
- state, reward, done, info = env.step(action)
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/environments/env_int_example.py b/igibson/examples/environments/env_int_example.py
deleted file mode 100644
index 164479964..000000000
--- a/igibson/examples/environments/env_int_example.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import download_assets
-
-
-def main():
- """
- Creates an iGibson environment from a config file with a turtlebot in Rs_int (interactive).
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- # If they have not been downloaded before, download assets
- download_assets()
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- # config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- action = env.action_space.sample()
- state, reward, done, info = env.step(action)
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/environments/env_int_selector.py b/igibson/examples/environments/env_int_selector.py
deleted file mode 100644
index c407eaa7c..000000000
--- a/igibson/examples/environments/env_int_selector.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import logging
-import os
-from random import random
-from sys import platform
-
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import get_available_ig_scenes
-from igibson.utils.utils import let_user_pick
-
-
-def main(random_selection=False):
- """
- Prompts the user to select any available interactive scene and loads a turtlebot into it.
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- # config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building
- available_ig_scenes = get_available_ig_scenes()
- scene_id = available_ig_scenes[let_user_pick(available_ig_scenes, random_selection=random_selection) - 1]
- env = iGibsonEnv(config_file=config_data, scene_id=scene_id, mode="gui_interactive")
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- action = env.action_space.sample()
- state, reward, done, info = env.step(action)
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/environments/env_nonint_example.py b/igibson/examples/environments/env_nonint_example.py
deleted file mode 100644
index de27dd96c..000000000
--- a/igibson/examples/environments/env_nonint_example.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import download_assets, download_demo_data
-
-
-def main():
- """
- Creates an iGibson environment from a config file with a turtlebot in Rs (not interactive).
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- # If they have not been downloaded before, download assets and Rs Gibson (non-interactive) models
- download_assets()
- download_demo_data()
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- action = env.action_space.sample()
- state, reward, done, info = env.step(action)
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/environments/env_nonint_selector.py b/igibson/examples/environments/env_nonint_selector.py
deleted file mode 100644
index 99fdabf22..000000000
--- a/igibson/examples/environments/env_nonint_selector.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import logging
-import os
-import random
-from sys import platform
-
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import get_available_g_scenes
-from igibson.utils.utils import let_user_pick
-
-
-def main(random_selection=False):
- """
- Prompts the user to select any available non-interactive scene and loads a turtlebot into it.
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- available_g_scenes = get_available_g_scenes()
- scene_id = available_g_scenes[let_user_pick(available_g_scenes, random_selection=random_selection) - 1]
- env = iGibsonEnv(config_file=config_filename, scene_id=scene_id, mode="gui_interactive")
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- action = env.action_space.sample()
- state, reward, done, info = env.step(action)
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/example_selector.py b/igibson/examples/example_selector.py
deleted file mode 100644
index a832c9bcb..000000000
--- a/igibson/examples/example_selector.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import importlib
-import os
-import pkgutil
-import signal
-import string
-from multiprocessing import Process
-
-import igibson.examples as examples
-from igibson.utils.utils import let_user_pick
-
-TIMEOUT = 3
-
-
-def interrupted(signum, frame):
- raise ValueError("Time-up for keyboard input")
-
-
-signal.signal(signal.SIGALRM, interrupted)
-
-
-def timed_input():
- try:
- foo = input("Press ENTER to execute or 's'+ENTER to skip (or wait 3 secs to execute)\n")
- return foo == "s"
- except ValueError:
- # timeout
- return
-
-
-def main():
- """
- Creates an iGibson environment from a config file with a turtlebot in Rs_int (interactive).
- It steps the environment 100 times with random actions sampled from the action space,
- using the Gym interface, resetting it 10 times.
- """
- examples_list = ["help", "all"]
- for kk in pkgutil.walk_packages(examples.__path__, examples.__name__ + "."):
- if not kk.ispkg and kk.name[17:] != "example_selector":
- examples_list += [kk.name[17:]]
-
- selected_demo = 0
- logo = (
- " _ _____ _ _" + "\n" + "(_) / ____|(_)| |" + "\n"
- " _ | | __ _ | |__ ___ ___ _ __" + "\n"
- "| || | |_ || || '_ \ / __| / _ \ | '_ \\" + "\n"
- "| || |__| || || |_) |\__ \| (_) || | | |" + "\n"
- "|_| \_____||_||_.__/ |___/ \___/ |_| |_|" + "\n"
- )
- while selected_demo == 0:
- print(logo)
- print("Select a demo/example, 'help' for information about a specific demo, or 'all' to run all demos:")
- selected_demo = let_user_pick(examples_list, print_intro=False) - 1
- if selected_demo == 0:
- help_demo = int(input("\nProvide the number of the example you need information for: ")) - 1
- if help_demo == 0:
- print("Print the description of a demo/example")
- elif help_demo == 1:
- print("Execute all demos/examples in order")
- else:
- module_help = importlib.import_module("igibson.examples." + examples_list[help_demo])
- print(module_help.main.__doc__)
- input("Press enter")
- elif selected_demo == 1:
- print("Executing all demos")
- for idx in range(2, len(examples_list)):
- print("*" * 80)
- print("*" * 80)
- print(logo)
- print("*" * 80)
- print("*" * 80)
- signal.alarm(TIMEOUT)
- s = timed_input()
- # disable the alarm after success
- signal.alarm(0)
- if s:
- continue
- print("Executing " + examples_list[idx])
-
- i = importlib.import_module("igibson.examples." + examples_list[idx])
- if "selector" in examples_list[idx]:
- p = Process(target=i.main, args=("random=True",))
- else:
- p = Process(target=i.main)
- p.start()
- p.join()
- print("Ended " + examples_list[idx])
- else:
- print("Executing " + examples_list[selected_demo])
- i = importlib.import_module("igibson.examples." + examples_list[selected_demo])
- i.main()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/scenes/scenescan2igibson/README.md b/igibson/examples/gen_data/README.md
similarity index 100%
rename from igibson/examples/scenes/scenescan2igibson/README.md
rename to igibson/examples/gen_data/README.md
diff --git a/igibson/examples/scenes/scenescan2igibson/combine_texture.ipynb b/igibson/examples/gen_data/combine_texture.ipynb
similarity index 100%
rename from igibson/examples/scenes/scenescan2igibson/combine_texture.ipynb
rename to igibson/examples/gen_data/combine_texture.ipynb
diff --git a/igibson/examples/scenes/scenescan2igibson/combine_texture.py b/igibson/examples/gen_data/combine_texture.py
similarity index 100%
rename from igibson/examples/scenes/scenescan2igibson/combine_texture.py
rename to igibson/examples/gen_data/combine_texture.py
diff --git a/igibson/examples/scenes/scenescan2igibson/generate_floor_map.py b/igibson/examples/gen_data/generate_floor_map.py
similarity index 100%
rename from igibson/examples/scenes/scenescan2igibson/generate_floor_map.py
rename to igibson/examples/gen_data/generate_floor_map.py
diff --git a/igibson/examples/scenes/scenescan2igibson/generate_traversable_map.py b/igibson/examples/gen_data/generate_traversable_map.py
similarity index 100%
rename from igibson/examples/scenes/scenescan2igibson/generate_traversable_map.py
rename to igibson/examples/gen_data/generate_traversable_map.py
diff --git a/igibson/examples/learning/demo_collection_example.py b/igibson/examples/learning/demo_collection_example.py
deleted file mode 100644
index df97e5b06..000000000
--- a/igibson/examples/learning/demo_collection_example.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import argparse
-import datetime
-import logging
-import os
-import sys
-import tempfile
-
-import numpy as np
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
-from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher
-from igibson.utils.ig_logging import IGLogWriter
-from igibson.utils.utils import parse_config
-
-
-def main():
- """
- Example of how to save a demo of a task
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- args = parse_args()
- collect_demo(
- args.scene,
- args.task,
- args.task_id,
- args.instance_id,
- args.log_path,
- args.disable_save,
- args.disable_scene_cache,
- args.profile,
- args.config,
- )
-
-
-def parse_args():
- scene_choices = [
- "Beechwood_0_int",
- "Beechwood_1_int",
- "Benevolence_0_int",
- "Benevolence_1_int",
- "Benevolence_2_int",
- "Ihlen_0_int",
- "Ihlen_1_int",
- "Merom_0_int",
- "Merom_1_int",
- "Pomaria_0_int",
- "Pomaria_1_int",
- "Pomaria_2_int",
- "Rs_int",
- "Wainscott_0_int",
- "Wainscott_1_int",
- ]
-
- task_id_choices = [0, 1]
- parser = argparse.ArgumentParser(description="Run and collect a demo of a task")
- parser.add_argument(
- "--scene",
- type=str,
- choices=scene_choices,
- default="Rs_int",
- nargs="?",
- help="Scene name/ID matching iGibson interactive scenes.",
- )
- parser.add_argument(
- "--task",
- type=str,
- required=False,
- default="cleaning_out_drawers",
- nargs="?",
- help="Name of task to collect a demo of. If it a BEHAVIOR activity, the name should be one of the official"
- " activity labels, matching one of the folders in the BDDL repository.",
- )
- parser.add_argument(
- "--task_id",
- type=int,
- required=False,
- default=0,
- choices=task_id_choices,
- nargs="?",
- help="[Only for BEHAVIOR activities] Integer ID identifying the BDDL definition of the BEHAVIOR activity. "
- "Since we only provide two definitions per activity, the ID should be 0 or 1.",
- )
- parser.add_argument(
- "--instance_id",
- type=int,
- required=False,
- default=0,
- help="[Only for BEHAVIOR activities] Instance of BEHAVIOR activity (particular URDF corresponding to "
- "an instantiation of a BDDL activity definition)",
- )
- demo_file = os.path.join(tempfile.gettempdir(), "demo.hdf5")
- parser.add_argument("--log_path", type=str, default=demo_file, help="Path (and filename) of log file")
- parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.")
- parser.add_argument(
- "--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches."
- )
- parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
- parser.add_argument(
- "--config",
- help="which config file to use [default: use yaml files in examples/configs]",
- default=os.path.join(igibson.example_config_path, "behavior_vr.yaml"),
- )
- return parser.parse_args()
-
-
-def collect_demo(
- scene,
- task,
- task_id=0,
- instance_id=0,
- log_path=None,
- disable_save=False,
- disable_scene_cache=False,
- profile=False,
- config_file=os.path.join(igibson.example_config_path, "behavior_vr.yaml"),
-):
- """ """
- # HDR files for PBR rendering
- hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
- hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
- light_modulation_map_filename = os.path.join(
- igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
- )
- background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
-
- # Rendering settings
- rendering_settings = MeshRendererSettings(
- optimized=True,
- fullscreen=False,
- env_texture_filename=hdr_texture,
- env_texture_filename2=hdr_texture2,
- env_texture_filename3=background_texture,
- light_modulation_map_filename=light_modulation_map_filename,
- enable_shadow=True,
- enable_pbr=True,
- msaa=False,
- light_dimming_factor=1.0,
- )
-
- config = parse_config(config_file)
- config["task"] = task
- config["task_id"] = task_id
- config["scene_id"] = scene
- config["instance_id"] = instance_id
- config["online_sampling"] = disable_scene_cache
- config["load_clutter"] = True
- env = iGibsonEnv(
- config_file=config,
- mode="headless",
- action_timestep=1 / 30.0,
- physics_timestep=1 / 300.0,
- rendering_settings=rendering_settings,
- )
- env.reset()
- robot = env.robots[0]
-
- log_writer = None
- if not disable_save:
- timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
- if log_path is None:
- log_path = "{}_{}_{}_{}_{}.hdf5".format(task, task_id, scene, instance_id, timestamp)
- log_writer = IGLogWriter(
- env.simulator,
- log_filepath=log_path,
- task=env.task,
- store_vr=False,
- vr_robot=robot,
- profiling_mode=profile,
- filter_objects=True,
- )
- log_writer.set_up_data_storage()
- log_writer.hf.attrs["/metadata/instance_id"] = instance_id
-
- steps = 0
-
- # Main recording loop
- while True:
- if robot.__class__.__name__ == "BehaviorRobot" and steps < 2:
- # Use the first 2 steps to activate BehaviorRobot
- action = np.zeros((28,))
- action[19] = 1
- action[27] = 1
- else:
- action = np.random.uniform(-0.01, 0.01, size=(robot.action_dim,))
-
- # Execute the action
- state, reward, done, info = env.step(action)
-
- if log_writer and not disable_save:
- log_writer.process_frame()
-
- if done:
- break
-
- if log_writer and not disable_save:
- log_writer.end_log_session()
-
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/object_states/cleaning_demo.py b/igibson/examples/object_states/cleaning_demo.py
deleted file mode 100644
index e0e882188..000000000
--- a/igibson/examples/object_states/cleaning_demo.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import logging
-
-from igibson import object_states
-from igibson.objects.ycb_object import YCBObject
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-
-
-def main():
- """
- Demo of a cleaning task
- Loads an interactive scene and sets all object surface to be dirty
- Loads also a cleaning tool that can be soaked in water and used to clean objects if moved manually
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- s = Simulator(mode="gui_interactive", image_width=1280, image_height=720, device_idx=0)
- # Set a better viewing direction
- s.viewer.initial_pos = [-0.7, 2.4, 1.1]
- s.viewer.initial_view_direction = [0, 0.9, -0.5]
- s.viewer.reset_viewer()
- # Only load a few objects
- load_object_categories = ["breakfast_table", "bottom_cabinet", "sink", "stove", "fridge", "window"]
- scene = InteractiveIndoorScene(
- "Rs_int", texture_randomization=False, object_randomization=False, load_object_categories=load_object_categories
- )
- s.import_scene(scene)
-
- # Load a cleaning tool
- block = YCBObject(name="036_wood_block", abilities={"soakable": {}, "cleaningTool": {}})
- s.import_object(block)
- block.set_position([-1.4, 3.0, 1.5])
-
- # Set everything that can go dirty and activate the water sources
- stateful_objects = set(
- scene.get_objects_with_state(object_states.Dusty)
- + scene.get_objects_with_state(object_states.Stained)
- + scene.get_objects_with_state(object_states.WaterSource)
- )
- for obj in stateful_objects:
- if object_states.Dusty in obj.states:
- logging.info("Setting object to be Dusty")
- obj.states[object_states.Dusty].set_value(True)
-
- if object_states.Stained in obj.states:
- logging.info("Setting object to be Stained")
- obj.states[object_states.Stained].set_value(True)
-
- if object_states.WaterSource in obj.states and object_states.ToggledOn in obj.states:
- logging.info("Setting water source object to be ToggledOn")
- obj.states[object_states.ToggledOn].set_value(True)
-
- try:
- while True:
- s.step()
- finally:
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/object_states/cleaning_demo_with_reset.py b/igibson/examples/object_states/cleaning_demo_with_reset.py
deleted file mode 100644
index 01e684096..000000000
--- a/igibson/examples/object_states/cleaning_demo_with_reset.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import logging
-import os
-
-import numpy as np
-import pybullet as p
-
-from igibson import object_states
-from igibson.objects.articulated_object import URDFObject
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_model_path
-from igibson.utils.utils import restoreState
-
-
-def main():
- """
- Demo of a cleaning task that resets after everything has been cleaned
- To save/load state it combines pybullet save/load functionality and additional iG functions for the extended states
- Loads an empty scene with a sink, a dusty table and a dirty and stained bowl, and a cleaning tool
- If everything is cleaned, or after N steps, the scene resets to the initial state
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- s = Simulator(mode="gui_interactive", image_width=1280, image_height=720)
- # Set a better viewing direction
- s.viewer.initial_pos = [-0.5, -0.4, 1.5]
- s.viewer.initial_view_direction = [0.7, 0.1, -0.7]
- s.viewer.reset_viewer()
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- # Load sink ON
- model_path = os.path.join(get_ig_model_path("sink", "sink_1"), "sink_1.urdf")
- sink = URDFObject(
- filename=model_path,
- category="sink",
- name="sink_1",
- scale=np.array([0.8, 0.8, 0.8]),
- abilities={"toggleable": {}, "waterSource": {}},
- )
- s.import_object(sink)
- sink.set_position([1, 1, 0.8])
- assert sink.states[object_states.ToggledOn].set_value(True)
-
- # Load cleaning tool
- model_path = get_ig_model_path("scrub_brush", "scrub_brush_000")
- model_filename = os.path.join(model_path, "scrub_brush_000.urdf")
- max_bbox = [0.1, 0.1, 0.1]
- avg = {"size": max_bbox, "density": 67.0}
- brush = URDFObject(
- filename=model_filename,
- category="scrub_brush",
- name="scrub_brush",
- avg_obj_dims=avg,
- fit_avg_dim_volume=True,
- model_path=model_path,
- )
- s.import_object(brush)
- brush.set_position([0, -2, 0.4])
-
- # Load table with dust
- model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf")
- desk = URDFObject(
- filename=model_path,
- category="breakfast_table",
- name="19898",
- scale=np.array([0.8, 0.8, 0.8]),
- abilities={"dustyable": {}},
- )
- s.import_object(desk)
- desk.set_position([1, -2, 0.4])
- assert desk.states[object_states.Dusty].set_value(True)
-
- # Load a bowl with stains
- model_path = os.path.join(get_ig_model_path("bowl", "68_0"), "68_0.urdf")
- bowl = URDFObject(filename=model_path, category="bowl", name="bowl", abilities={"dustyable": {}, "stainable": {}})
- s.import_object(bowl)
- assert bowl.states[object_states.OnTop].set_value(desk, True, use_ray_casting_method=True)
- assert bowl.states[object_states.Stained].set_value(True)
-
- # Save the initial state.
- pb_initial_state = p.saveState() # Save pybullet state (kinematics)
- brush_initial_extended_state = brush.dump_state() # Save brush extended state
- logging.info(brush_initial_extended_state)
- desk_initial_extended_state = desk.dump_state() # Save desk extended state
- logging.info(desk_initial_extended_state)
- bowl_initial_extended_state = bowl.dump_state() # Save bowl extended state
- logging.info(bowl_initial_extended_state)
-
- # Main simulation loop.
- max_steps = 1000
- try:
- while True:
- # Keep stepping until table or bowl are clean, or we reach 1000 steps
- steps = 0
- while (
- desk.states[object_states.Dusty].get_value()
- and bowl.states[object_states.Stained].get_value()
- and steps != max_steps
- ):
- steps += 1
- s.step()
- logging.info("Step {}".format(steps))
-
- if not desk.states[object_states.Dusty].get_value():
- logging.info("Reset because Table cleaned")
- elif not bowl.states[object_states.Stained].get_value():
- logging.info("Reset because Bowl cleaned")
- else:
- logging.info("Reset because max steps")
-
- # Reset to the initial state
- restoreState(pb_initial_state)
- brush.load_state(brush_initial_extended_state)
- brush.force_wakeup()
- desk.load_state(desk_initial_extended_state)
- desk.force_wakeup()
- bowl.load_state(bowl_initial_extended_state)
- bowl.force_wakeup()
-
- finally:
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/object_states/sliceable_demo_with_reset.py b/igibson/examples/object_states/sliceable_demo_with_reset.py
deleted file mode 100644
index fecfc32d4..000000000
--- a/igibson/examples/object_states/sliceable_demo_with_reset.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import logging
-import os
-
-import numpy as np
-import pybullet as p
-
-from igibson import object_states
-from igibson.objects.articulated_object import URDFObject
-from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_model_path
-from igibson.utils.utils import restoreState
-
-
-def main():
- """
- Demo of a slicing task that resets after everything
- To save/load state it combines pybullet save/load functionality and additional iG functions for the extended states
- Loads an empty scene with a desk and an apple. After a while, the apple gets sliced. Then, the scene resets
- This demo also demonstrates how to create manually a grouped object with multiple URDF models, and a
- multiplexed object (two URDF models for the same object) with a model for the full object and a model
- of the grouped slices
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- s = Simulator(mode="gui_interactive", image_width=1280, image_height=720)
- # Set a better viewing direction
- s.viewer.initial_pos = [-0.3, -0.3, 1.1]
- s.viewer.initial_view_direction = [0.7, 0.6, -0.4]
- s.viewer.reset_viewer()
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- # Load a desk
- model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf")
- desk = URDFObject(
- filename=model_path, category="breakfast_table", name="19898", scale=np.array([0.8, 0.8, 0.8]), abilities={}
- )
- s.import_object(desk)
- desk.set_position([0, 0, 0.4])
-
- # Create an URDF object of an apple, but doesn't load it in the simulator
- model_path = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf")
- whole_obj = URDFObject(
- model_path, name="00_0", category="apple", scale=np.array([1.0, 1.0, 1.0]), initial_pos=[100, 100, -100]
- )
-
- object_parts = []
- # Check the parts that compose the apple and create URDF objects of them
- for i, part in enumerate(whole_obj.metadata["object_parts"]):
- part_category = part["category"]
- part_model = part["model"]
- # Scale the offset accordingly
- part_pos = part["pos"] * whole_obj.scale
- part_orn = part["orn"]
- part_model_path = get_ig_model_path(part_category, part_model)
- part_filename = os.path.join(part_model_path, part_model + ".urdf")
- part_obj_name = whole_obj.name + "_part_{}".format(i)
- part_obj = URDFObject(
- part_filename,
- name=part_obj_name,
- category=part_category,
- model_path=part_model_path,
- scale=whole_obj.scale,
- initial_pos=[100 + i, 100, -100],
- )
- object_parts.append((part_obj, (part_pos, part_orn)))
-
- # Group the apple parts into a single grouped object
- grouped_parts_obj = ObjectGrouper(object_parts)
-
- # Create a multiplexed object: either the full apple, or the parts
- multiplexed_obj = ObjectMultiplexer(whole_obj.name + "_multiplexer", [whole_obj, grouped_parts_obj], 0)
-
- # Finally, load the multiplexed object
- s.import_object(multiplexed_obj)
- multiplexed_obj.set_position([0, 0, 0.72])
-
- # Let the apple get stable
- for _ in range(100):
- s.step()
-
- # Save the initial state.
- initial_state_pb = p.saveState()
- initial_state_multiplexed_obj = multiplexed_obj.dump_state()
- logging.info(multiplexed_obj)
-
- try:
- while True:
- logging.info("Stepping the simulator")
- for _ in range(100):
- s.step()
-
- logging.info("Slicing the apple and moving the parts")
- # Slice the apple and set the object parts away
- multiplexed_obj.states[object_states.Sliced].set_value(True)
-
- # Check that the multiplexed changed to the group of parts
- assert isinstance(multiplexed_obj.current_selection(), ObjectGrouper)
-
- # Move the parts
- for part_obj in multiplexed_obj.objects:
- part_obj.set_position([0, 0, 1])
-
- logging.info("Stepping the simulator")
- for _ in range(100):
- s.step()
-
- logging.info("Restoring the state")
- # Restore the state
- restoreState(initial_state_pb)
-
- # The apple should become whole again
- multiplexed_obj.load_state(initial_state_multiplexed_obj)
-
- finally:
- s.disconnect()
- p.removeState(initial_state_pb)
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/objects/__init__.py b/igibson/examples/objects/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/objects/draw_bounding_box.py b/igibson/examples/objects/draw_bounding_box.py
deleted file mode 100644
index 50f85f1a3..000000000
--- a/igibson/examples/objects/draw_bounding_box.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import itertools
-import logging
-import os
-
-import numpy as np
-import pybullet as p
-import trimesh
-from scipy.spatial.transform import Rotation
-
-import igibson
-from igibson.objects.articulated_object import URDFObject
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils import utils
-
-
-def main():
- """
- Shows how to obtain the bounding box of an articulated object.
- Draws the bounding box around the loaded object, a cabinet, while it moves.
- Visible only in the pybullet GUI.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- s = Simulator(mode="headless", use_pb_gui=True)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- # Banana is a single-link object and Door is a multi-link object.
- banana_dir = os.path.join(igibson.ig_dataset_path, "objects/banana/09_0")
- banana_filename = os.path.join(banana_dir, "09_0.urdf")
- door_dir = os.path.join(igibson.ig_dataset_path, "objects/door/8930")
- door_filename = os.path.join(door_dir, "8930.urdf")
-
- banana = URDFObject(
- filename=banana_filename, category="banana", scale=np.array([3.0, 5.0, 2.0]), merge_fixed_links=False
- )
- door = URDFObject(filename=door_filename, category="door", scale=np.array([1.0, 2.0, 3.0]), merge_fixed_links=False)
- s.import_object(banana)
- s.import_object(door)
- banana.set_position_orientation([2, 0, 0.75], [0, 0, 0, 1])
- door.set_position_orientation([-2, 0, 2], Rotation.from_euler("XYZ", [0, 0, -np.pi / 4]).as_quat())
-
- # Main simulation loop
- try:
- first_step = True
- while True:
- # Step simulation.
- s.step()
-
- line_idx = 0
- for obj in [banana, door]:
- # Draw new debug lines for the bounding boxes.
- bbox_center, bbox_orn, bbox_bf_extent, bbox_wf_extent = obj.get_base_aligned_bounding_box(visual=True)
- bbox_frame_vertex_positions = np.array(list(itertools.product((1, -1), repeat=3))) * (
- bbox_bf_extent / 2
- )
- bbox_transform = utils.quat_pos_to_mat(bbox_center, bbox_orn)
- world_frame_vertex_positions = trimesh.transformations.transform_points(
- bbox_frame_vertex_positions, bbox_transform
- )
- for i, from_vertex in enumerate(world_frame_vertex_positions):
- for j, to_vertex in enumerate(world_frame_vertex_positions):
- if j <= i:
- assert (
- p.addUserDebugLine(
- from_vertex,
- to_vertex,
- lineColorRGB=[1.0, 0.0, 0.0],
- lineWidth=1,
- lifeTime=0,
- replaceItemUniqueId=-1 if first_step else line_idx,
- )
- == line_idx
- )
- line_idx += 1
- first_step = False
- finally:
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/objects/highlight_objects.py b/igibson/examples/objects/highlight_objects.py
deleted file mode 100644
index d792274dc..000000000
--- a/igibson/examples/objects/highlight_objects.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""
- Generate example top-down segmentation map via renderer
-"""
-import logging
-
-import numpy as np
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-
-
-def main():
- """
- Highlights visually all object instances of some given category and then removes the highlighting
- It also demonstrates how to apply an action on all instances of objects of a given category
- ONLY WORKS WITH OPTIMIZED RENDERING (not on Mac)
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- settings = MeshRendererSettings(optimized=True, enable_shadow=True, blend_highlight=True)
- s = Simulator(
- mode="gui_interactive",
- image_width=512,
- image_height=512,
- rendering_settings=settings,
- )
- scene = InteractiveIndoorScene(
- "Rs_int", texture_randomization=False, load_object_categories=["window"], object_randomization=False
- )
- s.import_scene(scene)
-
- i = 0
- while True:
- s.step()
- if i % 100 == 0:
- logging.info("Highlighting windows")
- for obj in scene.objects_by_category["window"]:
- obj.highlight()
-
- if i % 100 == 50:
- logging.info("Deactivating the highlight on windows")
- for obj in scene.objects_by_category["window"]:
- obj.unhighlight()
-
- i += 1
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/objects/load_object_selector.py b/igibson/examples/objects/load_object_selector.py
deleted file mode 100644
index f3ed65eb6..000000000
--- a/igibson/examples/objects/load_object_selector.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.objects.articulated_object import URDFObject
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.robots.turtlebot import Turtlebot
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import (
- get_all_object_categories,
- get_ig_avg_category_specs,
- get_ig_category_path,
- get_ig_model_path,
- get_object_models_of_category,
-)
-from igibson.utils.utils import let_user_pick, parse_config
-
-
-def main(random_selection=False):
- """
- This demo shows how to load any scaled objects from the iG object model dataset and
- additional objects from the YCB dataset in predefined locations
- The user selects an object model to load
- The objects can be loaded into an empty scene, an interactive scene (iG) or a static scene (Gibson)
- The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots
- and executing actions
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- scene_options = ["Empty scene", "Interactive scene (iG)", "Static scene (Gibson)"]
- type_of_scene = let_user_pick(scene_options, random_selection=random_selection) - 1
-
- if type_of_scene == 0: # Empty
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- # scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
- s.import_robot(turtlebot)
-
- elif type_of_scene == 1: # iG
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building
- config_data["visible_target"] = False
- config_data["visible_path"] = False
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- s = env.simulator
-
- elif type_of_scene == 2: # Gibson
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=False)
- s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
- s.import_robot(turtlebot)
-
- # Set a better viewing direction
- s.viewer.initial_pos = [-1.7, -0.9, 0.9]
- s.viewer.initial_view_direction = [0.9, 0, -0.3]
- s.viewer.reset_viewer()
-
- # Select a category to load
- available_obj_categories = get_all_object_categories()
- obj_category = available_obj_categories[
- let_user_pick(available_obj_categories, random_selection=random_selection) - 1
- ]
-
- # Select a model to load
- available_obj_models = get_object_models_of_category(obj_category)
- obj_model = available_obj_models[let_user_pick(available_obj_models, random_selection=random_selection) - 1]
-
- # Load the specs of the object categories, e.g., common scaling factor
- avg_category_spec = get_ig_avg_category_specs()
-
- try:
- # Create the full path combining the path for all models and the name of the model
- model_path = get_ig_model_path(obj_category, obj_model)
- filename = os.path.join(model_path, obj_model + ".urdf")
-
- # Create a unique name for the object instance
- obj_name = "{}_{}".format(obj_category, 0)
-
- # Create and import the object
- simulator_obj = URDFObject(
- filename,
- name=obj_name,
- category=obj_category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(obj_category),
- fit_avg_dim_volume=True,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=[0.5, -0.5, 1.01],
- )
- s.import_object(simulator_obj)
-
- if type_of_scene == 1:
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- # action = env.action_space.sample()
- state, reward, done, info = env.step([0.1, 0.1])
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- else:
- for i in range(10000):
- with Profiler("Simulator step"):
- turtlebot.apply_action([0.1, 0.1])
- s.step()
- rgb = s.renderer.render_robot_cameras(modes=("rgb"))
-
- finally:
- if type_of_scene == 1:
- env.close()
- else:
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/objects/load_objects.py b/igibson/examples/objects/load_objects.py
deleted file mode 100644
index 114df22d0..000000000
--- a/igibson/examples/objects/load_objects.py
+++ /dev/null
@@ -1,168 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import numpy as np
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.objects.articulated_object import URDFObject
-from igibson.objects.ycb_object import YCBObject
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.robots.turtlebot import Turtlebot
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path
-from igibson.utils.utils import let_user_pick, parse_config
-
-
-def main():
- """
- This demo shows how to load scaled objects from the iG object model dataset and
- additional objects from the YCB dataset in predefined locations
- Loads a concrete object model of a table, and a random one of the same category, and 10 cracker boxes
- The objects can be loaded into an empty scene, an interactive scene (iG) or a static scene (Gibson)
- The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots
- and executing actions
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- scene_options = ["Empty scene", "Interactive scene (iG)", "Static scene (Gibson)"]
- type_of_scene = let_user_pick(scene_options) - 1
-
- if type_of_scene == 0: # Empty
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- # scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
- s.import_robot(turtlebot)
-
- elif type_of_scene == 1: # iG
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building
- config_data["visible_target"] = False
- config_data["visible_path"] = False
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- s = env.simulator
-
- elif type_of_scene == 2: # Gibson
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=False)
- s.import_scene(scene)
- robot_config = config["robot"]
- robot_config.pop("name")
- turtlebot = Turtlebot(**robot_config)
- s.import_robot(turtlebot)
-
- # Set a better viewing direction
- s.viewer.initial_pos = [-2, 1.4, 1.2]
- s.viewer.initial_view_direction = [0.6, -0.8, 0.1]
- s.viewer.reset_viewer()
-
- # Objects to load: two tables, the first one is predefined model, the second, random for the same category
- table_objects_to_load = {
- "table_1": {
- "category": "breakfast_table",
- "model": "1b4e6f9dd22a8c628ef9d976af675b86",
- "pos": (0.0, -0.2, 1.01),
- "orn": (0, 0, 90),
- },
- "table_2": {
- "category": "breakfast_table",
- "pos": (0.5, -2.0, 1.01),
- "orn": (0, 0, 45),
- },
- }
-
- # Load the specs of the object categories, e.g., common scaling factor
- avg_category_spec = get_ig_avg_category_specs()
-
- scene_objects = {}
- try:
- for obj in table_objects_to_load.values():
- category = obj["category"]
- if category in scene_objects:
- scene_objects[category] += 1
- else:
- scene_objects[category] = 1
-
- # Get the path for all models of this category
- category_path = get_ig_category_path(category)
-
- # If the specific model is given, we use it. If not, we select one randomly
- if "model" in obj:
- model = obj["model"]
- else:
- model = np.random.choice(os.listdir(category_path))
-
- # Create the full path combining the path for all models and the name of the model
- model_path = get_ig_model_path(category, model)
- filename = os.path.join(model_path, model + ".urdf")
-
- # Create a unique name for the object instance
- obj_name = "{}_{}".format(category, scene_objects[category])
-
- # Create and import the object
- simulator_obj = URDFObject(
- filename,
- name=obj_name,
- category=category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(category),
- fit_avg_dim_volume=True,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=obj["pos"],
- initial_orn=obj["orn"],
- )
- s.import_object(simulator_obj)
-
- for _ in range(10):
- obj = YCBObject("003_cracker_box")
- s.import_object(obj)
- obj.set_position_orientation(np.append(np.random.uniform(low=0, high=2, size=2), [1.8]), [0, 0, 0, 1])
-
- if type_of_scene == 1:
- for j in range(10):
- logging.info("Resetting environment")
- env.reset()
- for i in range(100):
- with Profiler("Environment action step"):
- # action = env.action_space.sample()
- state, reward, done, info = env.step([0.1, 0.1])
- if done:
- logging.info("Episode finished after {} timesteps".format(i + 1))
- break
- else:
- for i in range(10000):
- with Profiler("Simulator step"):
- turtlebot.apply_action([0.1, 0.1])
- s.step()
- rgb = s.renderer.render_robot_cameras(modes=("rgb"))
-
- finally:
- if type_of_scene == 1:
- env.close()
- else:
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/objects/object_temperature_texture.py b/igibson/examples/objects/object_temperature_texture.py
deleted file mode 100644
index 8e53d6172..000000000
--- a/igibson/examples/objects/object_temperature_texture.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import logging
-import os
-
-import igibson
-from igibson import object_states
-from igibson.objects.articulated_object import URDFObject
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_model_path
-from igibson.utils.utils import parse_config
-
-
-def main():
- """
- Demo of the texture change for an object
- Loads an apple and increases manually its temperature to observe the texture change from frozen, to cooked, to burnt
- Also shows how to change object-specific parameters such as the burning temperature
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml"))
- settings = MeshRendererSettings(enable_shadow=True, msaa=False, optimized=True)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- # Set a better viewing direction
- s.viewer.initial_pos = [0, 0.4, 0.2]
- s.viewer.initial_view_direction = [-0.4, -0.9, 0]
- s.viewer.reset_viewer()
-
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- # Load and place an apple
- apple_dir = get_ig_model_path("apple", "00_0")
- apple_urdf = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf")
- apple = URDFObject(apple_urdf, name="apple", category="apple", model_path=apple_dir)
- s.import_object(apple)
- apple.set_position([0, 0, 0.2])
- apple.states[object_states.Burnt].burn_temperature = 200
-
- # Manually increase the temperature of the apple
- for i in range(-10, 100):
- temp = i * 5
- logging.info("Apple temperature: {} degrees Celsius".format(temp))
- apple.states[object_states.Temperature].set_value(temp)
- logging.info("Frozen(Apple)? {}".format(apple.states[object_states.Frozen].get_value()))
- logging.info("Cooked(Apple)? {}".format(apple.states[object_states.Cooked].get_value()))
- logging.info("Burnt(Apple)? {}".format(apple.states[object_states.Burnt].get_value()))
- for j in range(10):
- # with Profiler("Simulator step"):
- s.step()
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/objects/object_viewer.py b/igibson/examples/objects/object_viewer.py
deleted file mode 100644
index fb36be264..000000000
--- a/igibson/examples/objects/object_viewer.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import logging
-import os
-
-import igibson
-from igibson.objects.articulated_object import URDFObject
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import (
- get_all_object_categories,
- get_ig_avg_category_specs,
- get_ig_model_path,
- get_object_models_of_category,
- get_scene_path,
-)
-from igibson.utils.utils import let_user_pick, parse_config
-
-
-def main():
- """
- Minimal example to visualize all the models available in the iG dataset
- It queries the user to select an object category and a model of that category, loads it and visualizes it
- No physical simulation
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- settings = MeshRendererSettings(enable_shadow=True, msaa=False, optimized=True)
- s = Simulator(
- mode="gui_interactive", image_width=512, image_height=512, vertical_fov=70, rendering_settings=settings
- )
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- # scene.load_object_categories(benchmark_names)
- s.import_scene(scene)
- s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0])
-
- # Select a category to load
- available_obj_categories = get_all_object_categories()
- obj_category = available_obj_categories[let_user_pick(available_obj_categories) - 1]
-
- # Select a model to load
- available_obj_models = get_object_models_of_category(obj_category)
- obj_model = available_obj_models[let_user_pick(available_obj_models) - 1]
-
- logging.info("Visualizing category {}, model {}".format(obj_category, obj_model))
-
- # Load the specs of the object categories, e.g., common scaling factor
- avg_category_spec = get_ig_avg_category_specs()
-
- try:
- # Create the full path combining the path for all models and the name of the model
- model_path = get_ig_model_path(obj_category, obj_model)
- filename = os.path.join(model_path, obj_model + ".urdf")
-
- # Create a unique name for the object instance
- obj_name = "{}_{}".format(obj_category, 0)
-
- # Create and import the object
- simulator_obj = URDFObject(
- filename,
- name=obj_name,
- category=obj_category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(obj_category),
- fit_avg_dim_volume=True,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=[0.5, -0.5, 1.01],
- )
- s.import_object(simulator_obj)
-
- # Set a better viewing direction
- s.viewer.initial_pos = [2.0, 0, 1.6]
- s.viewer.initial_view_direction = [-1, 0, 0]
- s.viewer.reset_viewer()
-
- # Visualize object
- while True:
- s.viewer.update()
-
- finally:
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/observations/__init__.py b/igibson/examples/observations/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/observations/generate_additional_visual_channels.py b/igibson/examples/observations/generate_additional_visual_channels.py
deleted file mode 100644
index c451f2dc5..000000000
--- a/igibson/examples/observations/generate_additional_visual_channels.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import cv2
-import numpy as np
-import yaml
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.render.profiler import Profiler
-from igibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT
-from igibson.utils.vision_utils import randomize_colors, segmentation_to_rgb
-
-
-def main():
- """
- Example of rendering additional sensor modalities
- Loads Rs_int (interactive) with some objects and and renders depth, normals, semantic and instance segmentation
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml")
- config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
- # Only load a few objects
- # config_data["load_object_categories"] = [
- # "breakfast_table",
- # "carpet",
- # "sofa",
- # "bottom_cabinet",
- # "sink",
- # "stove",
- # "fridge",
- # ]
- config_data["vertical_fov"] = 90
- config_data["output"] = ["task_obs", "rgb", "depth", "normal", "seg", "ins_seg"]
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- env = iGibsonEnv(config_file=config_data, mode="gui_interactive")
- # Set a better viewing direction
- env.simulator.viewer.initial_pos = [0, 1.1, 1.5]
- env.simulator.viewer.initial_view_direction = [0, 1, 0.1]
- env.simulator.viewer.reset_viewer()
-
- colors_ss = randomize_colors(MAX_CLASS_COUNT, bright=True)
- colors_is = randomize_colors(MAX_INSTANCE_COUNT, bright=True)
-
- for j in range(100):
- logging.info("Resetting environment")
- env.reset()
- for i in range(300):
- with Profiler("Environment action step"):
- action = env.action_space.sample()
- state, reward, done, info = env.step([0.1, 0.1])
-
- depth = state["depth"]
- cv2.imshow("Depth", depth)
-
- normal = state["normal"]
- cv2.imshow("Normals", cv2.cvtColor(normal, cv2.COLOR_RGB2BGR))
-
- seg = state["seg"]
- seg_int = np.round(seg[:, :, 0]).astype(np.int32)
- seg_cv = segmentation_to_rgb(seg_int, MAX_CLASS_COUNT, colors=colors_ss)
- cv2.imshow("Semantic Segmentation", seg_cv)
-
- ins_seg = state["ins_seg"]
- ins_seg_int = np.round(ins_seg[:, :, 0]).astype(np.int32)
- ins_seg_cv = segmentation_to_rgb(ins_seg_int, MAX_INSTANCE_COUNT, colors=colors_is)
- cv2.imshow("Instance Segmentation", ins_seg_cv)
-
- env.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/observations/generate_colored_dense_pointcloud.py b/igibson/examples/observations/generate_colored_dense_pointcloud.py
deleted file mode 100644
index 35042f478..000000000
--- a/igibson/examples/observations/generate_colored_dense_pointcloud.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import matplotlib.pyplot as plt
-import numpy as np
-import yaml
-from mpl_toolkits.mplot3d import Axes3D
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-
-
-def main():
- """
- Example of rendering and visualizing a single 3D dense pointcloud
- Loads Rs (non interactive) and a robot and renders a dense panorama depth map from the robot's camera
- It plots the point cloud with matplotlib, colored with the RGB values
- It also generates semantic segmentation
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- # Create iGibsonEnvironment with the Fetch rearrangement config
- mode = "headless"
- config = os.path.join(igibson.example_config_path, "fetch_rearrangement.yaml")
- config_data = yaml.load(open(config, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- scene_id = "Rs_int"
- nav_env = iGibsonEnv(
- config_file=config_data, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0
- )
-
- # Generate data ####################################
- # First set the robot in a random point in the scene
- point = nav_env.scene.get_random_point()[1]
- nav_env.robots[0].set_position(point)
- nav_env.simulator.sync()
-
- # Then generate RGB, semantic segmentation and 3D/depth, in panorama format
- pano_rgb = nav_env.simulator.renderer.get_equi(mode="rgb", use_robot_camera=True)
- pano_seg = nav_env.simulator.renderer.get_equi(mode="seg", use_robot_camera=True)
- pano_3d = nav_env.simulator.renderer.get_equi(mode="3d", use_robot_camera=True)
- depth = np.linalg.norm(pano_3d[:, :, :3], axis=2)
- theta = -np.arange(-np.pi / 2, np.pi / 2, np.pi / 128.0)[:, None]
- phi = np.arange(-np.pi, np.pi, 2 * np.pi / 256)[None, :]
- # depth = depth / np.cos(theta)
- x = np.cos(theta) * np.sin(phi) * depth
- y = np.sin(theta) * depth
- z = np.cos(theta) * np.cos(phi) * depth
-
- # Select only a few points
- mask = np.random.uniform(size=(128, 256)) > 0.5
-
- pts = (np.stack([x[mask], y[mask], z[mask]]).T).astype(np.float32)
- color = (pano_rgb[mask][:, :3]).astype(np.float32)
- label = (pano_seg[mask][:, 0]).astype(np.int32)
-
- assert len(pts) == len(label)
-
- # Create visualization: 3D points with RGB color
- fig = plt.figure()
- ax = Axes3D(fig)
- ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3])
- plt.show()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/observations/generate_lidar_colored_pointcloud.py b/igibson/examples/observations/generate_lidar_colored_pointcloud.py
deleted file mode 100644
index 602d1993f..000000000
--- a/igibson/examples/observations/generate_lidar_colored_pointcloud.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import logging
-import os
-from sys import platform
-
-import matplotlib.pyplot as plt
-import numpy as np
-import yaml
-from mpl_toolkits.mplot3d import Axes3D
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-
-
-def get_lidar_sampling_pattern():
- lidar_vertical_low = -15 / 180.0 * np.pi
- lidar_vertical_high = 15 / 180.0 * np.pi
- lidar_vertical_n_beams = 16
- lidar_vertical_beams = np.arange(
- lidar_vertical_low,
- lidar_vertical_high + (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),
- (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),
- )
-
- lidar_horizontal_low = -45 / 180.0 * np.pi
- lidar_horizontal_high = 45 / 180.0 * np.pi
- lidar_horizontal_n_beams = 468
- lidar_horizontal_beams = np.arange(
- lidar_horizontal_low,
- lidar_horizontal_high,
- (lidar_horizontal_high - lidar_horizontal_low) / (lidar_horizontal_n_beams),
- )
-
- xx, yy = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams)
- xx = xx.flatten()
- yy = yy.flatten()
-
- height = 128
-
- x_samples = (np.tan(xx) / np.cos(yy) * height // 2 + height // 2).astype(np.int)
- y_samples = (np.tan(yy) * height // 2 + height // 2).astype(np.int)
-
- x_samples = x_samples.flatten()
- y_samples = y_samples.flatten()
- return x_samples, y_samples
-
-
-x_samples, y_samples = get_lidar_sampling_pattern()
-
-
-def generate_data_lidar(nav_env, num_samples=3):
-
- rgb_all = []
- lidar_all = []
- lidar_all_2 = []
- label_all = []
-
- # Set initial point (not used)
- point = nav_env.scene.get_random_point()[1]
-
- for _ in range(num_samples):
- # Sample a new point
- new_point = nav_env.scene.get_random_point()[1]
- # If it is too far away, sample again
- while np.linalg.norm(new_point - point) > 1:
- new_point = nav_env.scene.get_random_point()[1]
-
- # Get vector of distance and change to (y, z, x)
- delta_pos = new_point - point
- delta_pos = np.array([delta_pos[1], delta_pos[2], delta_pos[0]])
-
- # Set new robot's position
- nav_env.robots[0].set_position(new_point)
-
- # Get observations (panorama RGB, 3D/Depth and semantic segmentation)
- pano_rgb = nav_env.simulator.renderer.get_cube(mode="rgb", use_robot_camera=True)
- pano_3d = nav_env.simulator.renderer.get_cube(mode="3d", use_robot_camera=True)
- pano_seg = nav_env.simulator.renderer.get_cube(mode="seg", use_robot_camera=True)
-
- r3 = np.array(
- [[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]]
- )
- transformatiom_matrix = np.eye(3)
-
- for i in range(4):
- lidar_all.append(pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] - delta_pos[None, :])
- rgb_all.append(pano_rgb[i][:, :, :3][x_samples, y_samples])
- label_all.append(pano_seg[i][:, :, 0][x_samples, y_samples] * 255.0)
- lidar_all_2.append(
- pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] * 0.9 - delta_pos[None, :]
- )
- transformatiom_matrix = r3.dot(transformatiom_matrix)
-
- lidar_all = np.concatenate(lidar_all, 0).astype(np.float32)
- lidar_all_2 = np.concatenate(lidar_all_2, 0).astype(np.float32)
- rgb_all = np.concatenate(rgb_all, 0).astype(np.float32)
- label_all = np.concatenate(label_all, 0).astype(np.int32)
-
- assert len(label_all) == len(label_all)
-
- direction = lidar_all - lidar_all_2
- direction = direction / (np.linalg.norm(direction, axis=1)[:, None] + 1e-5)
-
- return lidar_all, direction, rgb_all, label_all
-
-
-def main():
- """
- Example of rendering and visualizing a single lidar-like pointcloud
- Loads Rs (non interactive) and a robot and renders a dense panorama depth map from the robot's camera
- Samples the depth map with a lidar-like pattern
- It plots the point cloud with matplotlib, colored with the RGB values
- It also generates segmentation and "direction"
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- # Create environment
- mode = "headless"
- scene_id = "Rs_int"
- config = os.path.join(igibson.example_config_path, "fetch_rearrangement.yaml")
- config_data = yaml.load(open(config, "r"), Loader=yaml.FullLoader)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- config_data["texture_scale"] = 0.5
- nav_env = iGibsonEnv(
- config_file=config_data, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0
- )
-
- # Generate data
- pts, direction, color, label = generate_data_lidar(nav_env)
-
- # Create visualization: 3D points with RGB color
- fig = plt.figure()
- ax = Axes3D(fig)
- ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3])
- plt.show()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/observations/generate_topdown_semseg.py b/igibson/examples/observations/generate_topdown_semseg.py
deleted file mode 100644
index 7e5c47a6c..000000000
--- a/igibson/examples/observations/generate_topdown_semseg.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import colorsys
-import logging
-
-import cv2
-import matplotlib.cm as cm
-import matplotlib.pyplot as plt
-import numpy as np
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT
-from igibson.utils.vision_utils import segmentation_to_rgb
-
-
-def main():
- """
- Example of generating a topdown semantic segmentation map
- Loads Rs_int (interactive) with all or some objects (code can be commented)
- and and renders a semantic segmentation image top-down
- This is also an example of how to load a scene without the ceiling to facilitate creating visualizations
- Quit with q
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.01)
- s = Simulator(mode="headless", image_width=512, image_height=512, device_idx=0, rendering_settings=settings)
- scene = InteractiveIndoorScene(
- "Rs_int",
- texture_randomization=False,
- object_randomization=False,
- # load_object_categories=[
- # "breakfast_table",
- # "carpet",
- # "sofa",
- # "bottom_cabinet",
- # "sink",
- # "stove",
- # "fridge",
- # ],
- not_load_object_categories=["ceilings"],
- )
- s.import_scene(scene)
- camera_pose = np.array([0, 0, 6.0])
- view_direction = np.array([0, 0, -1.0])
- s.renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 1, 0])
- seg = s.renderer.render(modes=("seg"))[0]
- seg_int = np.round(seg[:, :, 0] * MAX_CLASS_COUNT).astype(np.int32)
- seg_color = segmentation_to_rgb(seg_int, MAX_CLASS_COUNT)
-
- while True:
- cv2.imshow("Topdown Semantic Segmentation", seg_color)
- q = cv2.waitKey(1)
- if q == ord("q"):
- break
-
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/renderer/__init__.py b/igibson/examples/renderer/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/renderer/mesh_renderer_pano_example.py b/igibson/examples/renderer/mesh_renderer_pano_example.py
deleted file mode 100644
index 65f169d14..000000000
--- a/igibson/examples/renderer/mesh_renderer_pano_example.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import logging
-import os
-import sys
-
-import cv2
-import numpy as np
-
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.utils.assets_utils import get_scene_path
-
-
-def main():
- """
- Creates renderer and renders panorama images in Rs (no interactive). No physics.
- The camera view can be controlled.
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- global _mouse_ix, _mouse_iy, down, view_direction
-
- # If a model is given, we load it, otherwise we load Rs mesh (non interactive)
- if len(sys.argv) > 1:
- model_path = sys.argv[1]
- else:
- model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj")
-
- # Create renderer object and load the scene model
- settings = MeshRendererSettings(enable_pbr=False)
- renderer = MeshRenderer(width=512, height=512, rendering_settings=settings)
- renderer.load_object(model_path)
- renderer.add_instance_group([0])
-
- # Print some information about the loaded model
- logging.info(renderer.visual_objects, renderer.instances)
- logging.info(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
-
- # Create a simple viewer with OpenCV and a keyboard navigation
- px = 0
- py = 0.2
- camera_pose = np.array([px, py, 0.5])
- view_direction = np.array([0, -1, -0.3])
- renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
- renderer.set_fov(90)
- _mouse_ix, _mouse_iy = -1, -1
- down = False
-
- # Define the function callback for OpenCV events on the window
- def change_dir(event, x, y, flags, param):
- global _mouse_ix, _mouse_iy, down, view_direction
- if event == cv2.EVENT_LBUTTONDOWN:
- _mouse_ix, _mouse_iy = x, y
- down = True
- if event == cv2.EVENT_MOUSEMOVE:
- if down:
- dx = (x - _mouse_ix) / 100.0
- dy = (y - _mouse_iy) / 100.0
- _mouse_ix = x
- _mouse_iy = y
- r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0], [-np.sin(dy), 0, np.cos(dy)]])
- r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0], [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]])
- view_direction = r1.dot(r2).dot(view_direction)
- elif event == cv2.EVENT_LBUTTONUP:
- down = False
-
- cv2.namedWindow("Viewer")
- cv2.setMouseCallback("Viewer", change_dir)
-
- cv2.namedWindow("Panorama Viewer")
-
- # Move camera and render normal and panorama images
- while True:
- with Profiler("Render"):
- frame = renderer.render(modes=("rgb"))
- # Actual panorama image stuff
- img = renderer.get_equi()
-
- cv2.imshow("Viewer", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
- cv2.imshow("Panorama Viewer", img)
-
- q = cv2.waitKey(1)
- if q == ord("w"):
- px += 0.01
- elif q == ord("s"):
- px -= 0.01
- elif q == ord("a"):
- py += 0.01
- elif q == ord("d"):
- py -= 0.01
- elif q == ord("q"):
- break
- camera_pose = np.array([px, py, 0.5])
- renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
-
- # Cleanup
- renderer.release()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/robots/__init__.py b/igibson/examples/robots/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/robots/differential_drive_control_example.py b/igibson/examples/robots/differential_drive_control_example.py
deleted file mode 100644
index b9abc36e7..000000000
--- a/igibson/examples/robots/differential_drive_control_example.py
+++ /dev/null
@@ -1,95 +0,0 @@
-"""
-This demo shows how to use keyboard to control a two-wheeled robot
-"""
-import argparse
-import os
-from types import SimpleNamespace
-
-import numpy as np
-import pybullet as p
-import pybullet_data
-
-import igibson
-from igibson.objects.articulated_object import URDFObject
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
-from igibson.robots import REGISTERED_ROBOTS, TwoWheelRobot
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path
-from igibson.utils.utils import parse_config
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument(
- "--robot",
- type=str,
- default="Turtlebot",
- choices=list(REGISTERED_ROBOTS.keys()),
- help="Robot to use. Note that the selected robot must support differential drive!",
-)
-
-args = parser.parse_args()
-
-
-def main(args):
- rendering_settings = MeshRendererSettings(
- optimized=True,
- fullscreen=False,
- enable_shadow=True,
- enable_pbr=True,
- msaa=True,
- light_dimming_factor=1.0,
- )
- s = Simulator(
- mode="headless", use_pb_gui=True, rendering_settings=rendering_settings, image_height=512, image_width=512
- )
-
- scene = EmptyScene()
- s.scene = scene
- scene.objects_by_id = {}
-
- s.import_scene(scene)
- p.setAdditionalSearchPath(pybullet_data.getDataPath())
-
- agent = REGISTERED_ROBOTS[args.robot](action_type="continuous")
-
- s.import_robot(agent)
-
- # Make sure robot is a two wheeled robot and using differential drive
- assert isinstance(agent, TwoWheelRobot), "Robot must be a TwoWheelRobot!"
- assert (
- agent.controller_config["base"]["name"] == "DifferentialDriveController"
- ), "Robot must be using differential drive control for its base"
-
- agent.reset()
- i = 0
- actions = []
- print("Press ctrl-q to quit")
- while True:
- i += 1
- action = np.zeros(agent.action_dim)
-
- # 0 - forward/backwards (good) (up/down)
- # 1 - rotate robot base (good) (left/right)
-
- events = p.getKeyboardEvents()
- if 65295 in events:
- action[1] += -0.10
- if 65296 in events:
- action[1] += 0.10
- if 65297 in events:
- action[0] += 0.2
- if 65298 in events:
- action[0] += -0.2
- if 65307 in events and 113 in events:
- break
-
- agent.apply_action(action)
- actions.append(action)
- p.stepSimulation()
-
- s.disconnect()
-
-
-if __name__ == "__main__":
- main(args)
diff --git a/igibson/examples/robots/ik_control_example.py b/igibson/examples/robots/ik_control_example.py
deleted file mode 100644
index fae6353d2..000000000
--- a/igibson/examples/robots/ik_control_example.py
+++ /dev/null
@@ -1,182 +0,0 @@
-"""
-This demo shows how to use keyboard to control a Fetch robot via IK
-"""
-import argparse
-import os
-from types import SimpleNamespace
-
-import numpy as np
-import pybullet as p
-import pybullet_data
-
-import igibson
-from igibson.objects.articulated_object import URDFObject
-from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
-from igibson.robots.fetch import Fetch
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path
-from igibson.utils.utils import parse_config
-
-
-def main():
- rendering_settings = MeshRendererSettings(
- optimized=True,
- fullscreen=False,
- enable_shadow=True,
- enable_pbr=True,
- msaa=True,
- light_dimming_factor=1.0,
- )
- s = Simulator(
- mode="headless", use_pb_gui=True, rendering_settings=rendering_settings, image_height=512, image_width=512
- )
- p.configureDebugVisualizer(p.COV_ENABLE_GUI, enable=0)
-
- scene = EmptyScene()
- s.scene = scene
- scene.objects_by_id = {}
-
- s.import_scene(scene)
- p.setAdditionalSearchPath(pybullet_data.getDataPath())
-
- config = parse_config(os.path.join(igibson.root_path, "examples", "configs", "behavior_onboard_sensing_fetch.yaml"))
- vr_agent = Fetch(config)
- s.import_robot(vr_agent)
-
- table_objects_to_load = {
- "table_1": {
- "category": "breakfast_table",
- "model": "1b4e6f9dd22a8c628ef9d976af675b86",
- "pos": (1.500000, 0.000000, 0.35),
- "orn": (0, 0, 1, 1),
- },
- "coffee_cup_1": {
- "category": "coffee_cup",
- "model": "coffee_cup_000",
- "pos": (1.5, 0.20, 0.8),
- "orn": (0, 0, 0, 1),
- },
- "plate_1": {
- "category": "plate",
- "model": "plate_000",
- "pos": (1.5, -0.20, 0.8),
- "orn": (0, 0, 0, 1),
- },
- }
-
- avg_category_spec = get_ig_avg_category_specs()
-
- scene_objects = {}
- for obj in table_objects_to_load.values():
- category = obj["category"]
- if category in scene_objects:
- scene_objects[category] += 1
- else:
- scene_objects[category] = 1
-
- category_path = get_ig_category_path(category)
- if "model" in obj:
- model = obj["model"]
- else:
- model = np.random.choice(os.listdir(category_path))
- model_path = get_ig_model_path(category, model)
- filename = os.path.join(model_path, model + ".urdf")
- obj_name = "{}_{}".format(category, scene_objects[category])
-
- simulator_obj = URDFObject(
- filename,
- name=obj_name,
- category=category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(category),
- fit_avg_dim_volume=True,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=obj["pos"],
- initial_orn=[0, 0, 90],
- )
- s.import_object(simulator_obj)
- simulator_obj.set_orientation(obj["orn"])
-
- vr_agent.reset()
- i = 0
- actions = []
- print("Press ctrl-q to quit")
- while True:
- i += 1
- action = np.zeros(11)
- action[-1] = -1.0
-
- # 0 - forward/backwards (good) (up/down)
- # 1 - rotate robot base (good) (left/right)
- # 2 - turn head left/right (bad) (a/d)
- # * after reaching end of rotation, causes:
- # * arm to move, and then body to decrease height -0.1/0.1)
- # 3 - turn head up/down (good) (r/f)
- # 4 - move gripper towards/away from robot (good) (y/h)
- # 5 - move gripper left/right (good) (j/l)
- # 6 - move gripper down/up (bad) (i/k)
- # * also moves body
- # 7 - rotate gripper clockwise/counter clockwise in Z plane (u/o)
- # * also moves body
- # 8 - rotate gripper towards/out from body (x plane) ([/')
- # * also moves body
- # 9 - rotate gripper along plane of floor (y plane)
- # * also moves body
- # 10 - close/open gripper (good) (z/x)
-
- events = p.getKeyboardEvents()
- if 65295 in events:
- action[1] += -0.1
- if 65296 in events:
- action[1] += 0.1
- if 65297 in events:
- action[0] += 0.1
- if 65298 in events:
- action[0] += -0.1
- if 97 in events:
- action[2] += 0.1
- if 100 in events:
- action[2] += -0.1
- if 114 in events:
- action[3] += 0.1
- if 102 in events:
- action[3] += -0.1
- if 122 in events:
- action[10] += 0.1
- if 120 in events:
- action[10] += -0.1
- if 105 in events:
- action[6] += 0.1
- if 107 in events:
- action[6] += -0.1
- if 106 in events:
- action[5] += 0.1
- if 108 in events:
- action[5] += -0.1
- if 117 in events:
- action[7] += 0.1
- if 111 in events:
- action[7] += -0.1
- if 121 in events:
- action[4] += 0.1
- if 104 in events:
- action[4] += -0.1
- if 91 in events:
- action[8] += 0.1
- if 39 in events:
- action[8] += -0.1
- if 65307 in events and 113 in events:
- break
-
- action *= 5
- vr_agent.apply_action(action)
- actions.append(action)
- p.stepSimulation()
-
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/robots/robot_example.py b/igibson/examples/robots/robot_example.py
deleted file mode 100644
index b801de7c4..000000000
--- a/igibson/examples/robots/robot_example.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import logging
-import os
-
-import numpy as np
-
-import igibson
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.robots import REGISTERED_ROBOTS
-from igibson.scenes.empty_scene import EmptyScene
-from igibson.simulator import Simulator
-from igibson.utils.utils import parse_config
-
-
-def main():
- """
- Robot demo
- Loads all robots in an empty scene, generate random actions
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- # Create empty scene
- settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- # Create one instance of each robot aligned along the y axis
- position = [0, 0, 0]
- robots = {}
- for robot_config_file in os.listdir(os.path.join(igibson.example_config_path, "robots")):
- config = parse_config(os.path.join(igibson.example_config_path, "robots", robot_config_file))
- robot_config = config["robot"]
- robot_name = robot_config.pop("name")
- robot = REGISTERED_ROBOTS[robot_name](**robot_config)
- s.import_robot(robot)
- robot.set_position(position)
- robot.reset()
- robot.keep_still()
- robots[robot_name] = (robot, position[1])
- logging.info("Loaded " + robot_name)
- logging.info("Moving " + robot_name)
- # Set viewer in front
- s.viewer.initial_pos = [1.6, 0, 1.3]
- s.viewer.initial_view_direction = [-0.7, 0, -0.7]
- s.viewer.reset_viewer()
-
- for _ in range(100): # keep still for 10 seconds
- s.step()
-
- for _ in range(30):
- action = np.random.uniform(-1, 1, robot.action_dim)
- robot.apply_action(action)
- for _ in range(10):
- s.step()
-
- robot.keep_still()
- s.reload()
- scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
- s.import_scene(scene)
-
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/ros/__init__.py b/igibson/examples/ros/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml b/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml
index fb60c9656..e36e9a69c 100644
--- a/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml
+++ b/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml
@@ -72,5 +72,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/igibson/examples/scenes/__init__.py b/igibson/examples/scenes/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/scenes/g_scene_selector.py b/igibson/examples/scenes/g_scene_selector.py
deleted file mode 100644
index d9ded2338..000000000
--- a/igibson/examples/scenes/g_scene_selector.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import logging
-from sys import platform
-
-import numpy as np
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_available_g_scenes
-from igibson.utils.utils import let_user_pick
-
-
-def main():
- """
- Prompts the user to select any available non-interactive scene and loads it.
- Shows how to load directly scenes without the Environment interface
- Shows how to sample points in the scene and how to compute geodesic distance and the shortest path
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- available_g_scenes = get_available_g_scenes()
- scene_id = available_g_scenes[let_user_pick(available_g_scenes) - 1]
- settings = MeshRendererSettings(enable_shadow=True, msaa=False)
- # Reduce texture scale for Mac.
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- scene = StaticIndoorScene(
- scene_id,
- build_graph=True,
- )
- s.import_scene(scene)
-
- # Shows how to sample points in the scene
- np.random.seed(0)
- for _ in range(10):
- random_floor = scene.get_random_floor()
- p1 = scene.get_random_point(random_floor)[1]
- p2 = scene.get_random_point(random_floor)[1]
- shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True)
- logging.info("Random point 1: {}".format(p1))
- logging.info("Random point 2: {}".format(p2))
- logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance))
- logging.info("Shortest path from p1 to p2: {}".format(shortest_path))
-
- input("Press enter")
-
- while True:
- with Profiler("Simulator step"):
- s.step()
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/scenes/ig_scene_selector.py b/igibson/examples/scenes/ig_scene_selector.py
deleted file mode 100644
index de46236f8..000000000
--- a/igibson/examples/scenes/ig_scene_selector.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import logging
-from sys import platform
-
-import numpy as np
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-from igibson.utils.assets_utils import get_available_ig_scenes
-from igibson.utils.utils import let_user_pick
-
-
-def main():
- """
- Prompts the user to select any available interactive scene and loads it.
- Shows how to load directly scenes without the Environment interface
- Shows how to sample points in the scene by room type and how to compute geodesic distance and the shortest path
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- available_ig_scenes = get_available_ig_scenes()
- scene_id = available_ig_scenes[let_user_pick(available_ig_scenes) - 1]
- settings = MeshRendererSettings(enable_shadow=True, msaa=False)
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- scene = InteractiveIndoorScene(
- scene_id,
- load_object_categories=[], # To load only the building. Fast
- build_graph=True,
- )
- s.import_scene(scene)
-
- # Shows how to sample points in the scene
- np.random.seed(0)
- for _ in range(10):
- pt = scene.get_random_point_by_room_type("living_room")[1]
- logging.info("Random point in living_room: {}".format(pt))
-
- for _ in range(10):
- random_floor = scene.get_random_floor()
- p1 = scene.get_random_point(random_floor)[1]
- p2 = scene.get_random_point(random_floor)[1]
- shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True)
- logging.info("Random point 1: {}".format(p1))
- logging.info("Random point 2: {}".format(p2))
- logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance))
- logging.info("Shortest path from p1 to p2: {}".format(shortest_path))
-
- input("Press enter")
-
- while True:
- with Profiler("Simulator step"):
- s.step()
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/scenes/scene_object_rand_example.py b/igibson/examples/scenes/scene_object_rand_example.py
deleted file mode 100644
index 61e8e75e0..000000000
--- a/igibson/examples/scenes/scene_object_rand_example.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import logging
-from sys import platform
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-
-
-def main():
- """
- Example of randomization of the texture in a scene
- Loads Rs_int (interactive) and randomizes the texture of the objects
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- settings = MeshRendererSettings(enable_shadow=True, msaa=False)
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- # load_object_categories = None
- load_object_categories = [
- "bottom_cabinet",
- "sofa",
- "rocking_chair",
- "swivel_chair",
- "folding_chair",
- "desk",
- "dining_table",
- "coffee_table",
- "breakfast_table",
- ] # Change to None to load the full house
- scene = InteractiveIndoorScene(
- "Rs_int",
- texture_randomization=False,
- load_object_categories=load_object_categories,
- object_randomization=True,
- )
- s.import_scene(scene)
- loaded = True
-
- for i in range(1, 10000):
- if not loaded:
- logging.info("Randomize object models")
-
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = InteractiveIndoorScene(
- "Rs_int",
- texture_randomization=False,
- load_object_categories=load_object_categories,
- object_randomization=True,
- )
- s.import_scene(scene)
- loaded = True
-
- if i % 1000 == 0:
- s.reload()
- loaded = False
-
- s.step()
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/scenes/scene_partial_loading_example.py b/igibson/examples/scenes/scene_partial_loading_example.py
deleted file mode 100644
index 417f3c24b..000000000
--- a/igibson/examples/scenes/scene_partial_loading_example.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import logging
-from sys import platform
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-
-
-def main():
- """
- Example of partial loading of a scene
- Loads only some objects (by category) and in some room types
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- settings = MeshRendererSettings(enable_shadow=True, msaa=False)
- if platform == "darwin":
- settings.texture_scale = 0.5
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = InteractiveIndoorScene(
- "Rs_int",
- texture_randomization=False,
- object_randomization=False,
- load_object_categories=["swivel_chair"],
- load_room_types=["living_room"],
- )
- s.import_scene(scene)
-
- while True:
- s.step()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/scenes/scene_texture_rand_example.py b/igibson/examples/scenes/scene_texture_rand_example.py
deleted file mode 100644
index 9dc8f1d95..000000000
--- a/igibson/examples/scenes/scene_texture_rand_example.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import logging
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-
-
-def main():
- """
- Example of randomization of the texture in a scene
- Loads Rs_int (interactive) and randomizes the texture of the objects
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
- settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
- scene = InteractiveIndoorScene(
- "Rs_int",
- load_object_categories=[], # To load only the building. Fast
- texture_randomization=True,
- object_randomization=False,
- )
- s.import_scene(scene)
-
- for i in range(10000):
- if i % 1000 == 0:
- logging.info("Randomize texture")
- scene.randomize_texture()
- s.step()
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/scenes/stadium_example.py b/igibson/examples/scenes/stadium_example.py
deleted file mode 100644
index 465c6a4fa..000000000
--- a/igibson/examples/scenes/stadium_example.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import logging
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.profiler import Profiler
-from igibson.scenes.stadium_scene import StadiumScene
-from igibson.simulator import Simulator
-
-
-def main():
- """
- Loads the Stadium scene
- This scene is default in pybullet but is not really useful in iGibson
- """
- logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
-
- settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
-
- scene = StadiumScene()
- s.import_scene(scene)
-
- while True:
- with Profiler("Simulator step"):
- s.step()
- s.disconnect()
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/examples/vr/__init__.py b/igibson/examples/vr/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/examples/web_ui/__init__.py b/igibson/examples/web_ui/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/external/motion/motion_planners/rrt_connect.py b/igibson/external/motion/motion_planners/rrt_connect.py
index e107678df..242f6514f 100644
--- a/igibson/external/motion/motion_planners/rrt_connect.py
+++ b/igibson/external/motion/motion_planners/rrt_connect.py
@@ -21,6 +21,7 @@ def rrt_connect(q1, q2, distance_fn, sample_fn, extend_fn, collision_fn, iterati
tree1, tree2 = nodes1, nodes2
if swap:
tree1, tree2 = nodes2, nodes1
+
s = sample_fn()
last1 = argmin(lambda n: distance_fn(n.config, s), tree1)
@@ -40,7 +41,7 @@ def rrt_connect(q1, q2, distance_fn, sample_fn, extend_fn, collision_fn, iterati
path1, path2 = last1.retrace(), last2.retrace()
if swap:
path1, path2 = path2, path1
- #print('{} iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2)))
+ print('{} iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2)))
return configs(path1[:-1] + path2[::-1])
return None
@@ -62,13 +63,13 @@ def birrt(q1, q2, distance, sample, extend, collision,
if collision(q1) or collision(q2):
return None
path = direct_path(q1, q2, extend, collision)
+
if path is not None:
return path
for attempt in irange(restarts + 1):
path = rrt_connect(q1, q2, distance, sample, extend,
collision, iterations=iterations)
if path is not None:
- #print('{} attempts'.format(attempt))
if smooth is None:
return path
return smooth_path(path, extend, collision, iterations=smooth)
diff --git a/igibson/external/motion/motion_planners/utils.py b/igibson/external/motion/motion_planners/utils.py
index 4a69a07b9..df0b9378e 100644
--- a/igibson/external/motion/motion_planners/utils.py
+++ b/igibson/external/motion/motion_planners/utils.py
@@ -8,8 +8,8 @@
INF = float('inf')
-RRT_ITERATIONS = 20
-RRT_RESTARTS = 2
+RRT_ITERATIONS = 500 #100
+RRT_RESTARTS = 10 #2
RRT_SMOOTHING = 20
diff --git a/igibson/external/pybullet_tools/utils.py b/igibson/external/pybullet_tools/utils.py
index 7bcdecd00..e53b26a8d 100644
--- a/igibson/external/pybullet_tools/utils.py
+++ b/igibson/external/pybullet_tools/utils.py
@@ -2580,7 +2580,9 @@ def approximate_as_cylinder(body, **kwargs):
#MAX_DISTANCE = 1e-3
-MAX_DISTANCE = 0.
+MAX_DISTANCE = 0.1
+
+MAX_DISTANCE_GRASP = 1e-3
def set_all_collisions(body_id, enabled=1):
@@ -2598,7 +2600,7 @@ def set_coll_filter(target_id, body_id, body_links, enable):
Sets collision filters for body - to enable or disable them
:param target_id: physics body to enable/disable collisions with
:param body_id: physics body to enable/disable collisions from
- :param body_links: physics links on body to enable/disable collisions from
+ :param body_id: physics links on body to enable/disable collisions from
:param enable: whether to enable/disable collisions
"""
target_link_idxs = [-1] + [i for i in range(p.getNumJoints(target_id))]
diff --git a/igibson/metrics/agent.py b/igibson/metrics/agent.py
index 276a0a69d..7619eca45 100644
--- a/igibson/metrics/agent.py
+++ b/igibson/metrics/agent.py
@@ -26,14 +26,14 @@ def __init__(self):
self.clip = 0.2
- def step_callback(self, env, _):
- robot = env.robots[0]
+ def step_callback(self, igbhvr_act_inst, _):
+ robot = igbhvr_act_inst.simulator.robots[0]
agent_work = {part: 0 for part in ["left_hand", "right_hand", "body"]}
agent_distance = {part: 0 for part in ["left_hand", "right_hand", "body"]}
for part in ["left_hand", "right_hand", "body"]:
self.next_state_cache[part] = {
- "position": np.array(p.getBasePositionAndOrientation(robot.links[part].get_body_id())[0]),
+ "position": np.array(p.getBasePositionAndOrientation(robot.parts[part].body_id)[0]),
}
if not self.initialized:
@@ -62,19 +62,18 @@ def step_callback(self, env, _):
self.agent_pos[part].append(list(self.state_cache[part]["position"]))
# Exclude agent teleports
delta_pos = np.clip(delta_pos, -self.clip, self.clip)
- if robot.links[part].movement_cid is None:
+ if robot.parts[part].movement_cid is None:
force = 0
work = 0
else:
- force = p.getConstraintState(robot.links[part].movement_cid)
+ force = p.getConstraintState(robot.parts[part].movement_cid)
work = np.abs((delta_pos * np.linalg.norm(force)))
distance = np.abs(delta_pos)
if part in ["left_hand", "right_hand"]:
- self.agent_local_pos[part].append(list(robot.links[part].get_local_position_orientation()[0]))
+ self.agent_local_pos[part].append(list(robot.parts[part].local_pos))
if part in ["left_hand", "right_hand"] and (
- len(p.getContactPoints(robot.links[part].get_body_id())) > 0
- or robot.links[part].object_in_hand is not None
+ len(p.getContactPoints(robot.parts[part].body_id)) > 0 or robot.parts[part].object_in_hand is not None
):
self.delta_agent_grasp_distance[part].append(distance)
self.agent_grasping[part].append(True)
@@ -133,8 +132,8 @@ def __init__(self):
self.clip = 0.2
- def step_callback(self, env, _):
- robot = env.robots[0]
+ def step_callback(self, igbhvr_act_inst, _):
+ robot = igbhvr_act_inst.simulator.robots[0]
agent_distance = {part: 0 for part in self.agent_pos}
self.next_state_cache = {
@@ -160,9 +159,9 @@ def step_callback(self, env, _):
gripper_distance = np.abs(delta_pos)
self.delta_agent_distance["gripper"].append(gripper_distance)
- self.agent_local_pos["gripper"].append(robot.get_relative_eef_position().tolist())
+ self.agent_local_pos["gripper"].append(list(robot.get_relative_eef_position()))
- contacts = p.getContactPoints(bodyA=robot.get_body_id(), linkIndexA=robot.eef_link_id)
+ contacts = p.getContactPoints(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id)
if len(contacts) > 0:
self.delta_agent_grasp_distance["gripper"].append(gripper_distance)
self.agent_grasping["gripper"].append(True)
diff --git a/igibson/metrics/dataset.py b/igibson/metrics/dataset.py
deleted file mode 100644
index d5a175979..000000000
--- a/igibson/metrics/dataset.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from collections import defaultdict
-
-import numpy as np
-
-from igibson.metrics.metric_base import MetricBase
-
-
-class DatasetMetric(MetricBase):
- """
- Used to transform saved demos into (state, action) pairs
- """
-
- def __init__(self):
- self.prev_state = None
- self.state_history = defaultdict(list)
-
- def start_callback(self, env, _):
- self.prev_state = env.get_state()
-
- def step_callback(self, env, log_reader):
- # when called, action is at timestep t and the state is at timestep t-1, so we need to use the previous state
- self.prev_state["action"] = log_reader.get_agent_action("vr_robot")
- for key, value in self.prev_state.items():
- self.state_history[key].append(value)
- self.prev_state = env.get_state()
-
- def gather_results(self):
- return self.state_history
diff --git a/igibson/metrics/disarrangement.py b/igibson/metrics/disarrangement.py
index bcfa7dcfd..2b6726828 100644
--- a/igibson/metrics/disarrangement.py
+++ b/igibson/metrics/disarrangement.py
@@ -21,9 +21,9 @@ def __init__(self):
self.delta_obj_disp_dict = {}
self.int_obj_disp_dict = {}
- def update_state_cache(self, env):
+ def update_state_cache(self, task):
state_cache = {}
- for obj_id, obj in env.scene.objects_by_name.items():
+ for obj_id, obj in task.scene.objects_by_name.items():
if obj.category == "agent":
continue
if type(obj) == ObjectMultiplexer:
@@ -87,9 +87,9 @@ def calculate_object_disarrangement(obj, prev_state_cache, cur_state_cache):
raise Exception
return obj_disarrangement
- def step_callback(self, env, _):
+ def step_callback(self, igbhvr_act_inst, _):
total_disarrangement = 0
- self.cur_state_cache = self.update_state_cache(env)
+ self.cur_state_cache = self.update_state_cache(igbhvr_act_inst)
if not self.initialized:
self.prev_state_cache = copy.deepcopy(self.cur_state_cache)
@@ -144,7 +144,7 @@ def __init__(self):
self.next_state_cache = {}
@staticmethod
- def cache_single_object(obj_id, obj, room_floors, env):
+ def cache_single_object(obj_id, obj, room_floors, task):
obj_cache = {}
for state_class, state in obj.states.items():
if not isinstance(state, BooleanState):
@@ -160,7 +160,7 @@ def cache_single_object(obj_id, obj, room_floors, env):
obj_cache[state_class] = relational_state_cache
else:
relational_state_cache = {}
- for target_obj_id, target_obj in env.scene.objects_by_name.items():
+ for target_obj_id, target_obj in task.scene.objects_by_name.items():
if obj_id == target_obj_id or target_obj.category == "agent":
continue
relational_state_cache[target_obj_id] = False
@@ -173,36 +173,36 @@ def cache_single_object(obj_id, obj, room_floors, env):
obj_cache[state_class] = relational_state_cache
return obj_cache
- def create_object_logical_state_cache(self, env):
+ def create_object_logical_state_cache(self, task):
room_floors = {
"room_floor_"
+ room_inst: RoomFloor(
category="room_floor",
name="room_floor_" + room_inst,
- scene=env.scene,
+ scene=task.scene,
room_instance=room_inst,
- floor_obj=env.scene.objects_by_name["floors"],
+ floor_obj=task.scene.objects_by_name["floors"],
)
- for room_inst in env.scene.room_ins_name_to_ins_id.keys()
+ for room_inst in task.scene.room_ins_name_to_ins_id.keys()
}
state_cache = {}
- for obj_id, obj in env.scene.objects_by_name.items():
+ for obj_id, obj in task.scene.objects_by_name.items():
if obj.category == "agent":
continue
state_cache[obj_id] = {}
if type(obj) == ObjectMultiplexer:
if obj.current_index == 0:
- cache_base = self.cache_single_object(obj_id, obj._multiplexed_objects[0], room_floors, env)
+ cache_base = self.cache_single_object(obj_id, obj._multiplexed_objects[0], room_floors, task)
cache_part_1 = None
cache_part_2 = None
else:
cache_base = None
cache_part_1 = self.cache_single_object(
- obj_id, obj._multiplexed_objects[1].objects[0], room_floors, env.task
+ obj_id, obj._multiplexed_objects[1].objects[0], room_floors, task
)
cache_part_2 = self.cache_single_object(
- obj_id, obj._multiplexed_objects[1].objects[1], room_floors, env.task
+ obj_id, obj._multiplexed_objects[1].objects[1], room_floors, task
)
state_cache[obj_id] = {
"base_states": cache_base,
@@ -211,7 +211,7 @@ def create_object_logical_state_cache(self, env):
"type": "multiplexer",
}
else:
- cache_base = self.cache_single_object(obj_id, obj, room_floors, env.task)
+ cache_base = self.cache_single_object(obj_id, obj, room_floors, task)
state_cache[obj_id] = {
"base_states": cache_base,
"type": "standard",
@@ -328,25 +328,25 @@ def compute_logical_disarrangement(self, object_state_cache_1, object_state_cach
"total_states": total_states,
}
- def step_callback(self, env, _):
- if not self.initialized and env.simulator.frame_count == SIMULATOR_SETTLE_TIME:
- self.initial_state_cache = self.create_object_logical_state_cache(env)
+ def step_callback(self, igbhvr_act_inst, _):
+ if not self.initialized and igbhvr_act_inst.simulator.frame_count == SIMULATOR_SETTLE_TIME:
+ self.initial_state_cache = self.create_object_logical_state_cache(igbhvr_act_inst)
self.initialized = True
else:
return
- def end_callback(self, env, _):
+ def end_callback(self, igbhvr_act_inst, _):
"""
When pybullet sleeps objects, getContactPoints is no longer refreshed
Setting collision groups on the agent (which happens when users first activate the agent)
Wipes active collision groups. To get the logical disarrangement, we must wake up all objects in the scene
This can only be done at the end of the scene so as to not affect determinism.
"""
- for obj in env.scene.objects_by_name.values():
+ for obj in igbhvr_act_inst.scene.objects_by_name.values():
obj.force_wakeup()
- env.simulator.step()
+ igbhvr_act_inst.simulator.step()
- self.cur_state_cache = self.create_object_logical_state_cache(env)
+ self.cur_state_cache = self.create_object_logical_state_cache(igbhvr_act_inst)
self.relative_logical_disarrangement = self.compute_logical_disarrangement(
self.initial_state_cache, self.cur_state_cache
diff --git a/igibson/metrics/gaze.py b/igibson/metrics/gaze.py
index eaade67cb..b2caa1ba8 100644
--- a/igibson/metrics/gaze.py
+++ b/igibson/metrics/gaze.py
@@ -32,14 +32,16 @@ def __init__(self):
self.obj_info_map = None
self.gaze_marker = None
- def start_callback(self, env, _):
- self.name_to_category = {obj.name: obj.category for obj in env.scene.objects_by_name.values()}
- self.task_obj_info = {obj.name: obj.category for obj in env.task.object_scope.values()}
+ def start_callback(self, igbhvr_act_inst, _):
+ self.name_to_category = {
+ obj.name: obj.category for obj in igbhvr_act_inst.simulator.scene.objects_by_name.values()
+ }
+ self.task_obj_info = {obj.name: obj.category for obj in igbhvr_act_inst.object_scope.values()}
- self.gaze_marker = GazeVizMarker(env.simulator, 0.02)
+ self.gaze_marker = GazeVizMarker(igbhvr_act_inst.simulator, 0.02)
- def step_callback(self, env, log_reader):
- s = env.simulator
+ def step_callback(self, igbhvr_act_inst, log_reader):
+ s = igbhvr_act_inst.simulator
eye_data = log_reader.get_vr_data().query("eye_data")
if eye_data[0]:
if self.target_obj in s.scene.objects_by_id:
diff --git a/igibson/metrics/metric_base.py b/igibson/metrics/metric_base.py
index 99038d2fd..14429144b 100644
--- a/igibson/metrics/metric_base.py
+++ b/igibson/metrics/metric_base.py
@@ -4,13 +4,13 @@
class MetricBase(with_metaclass(ABCMeta, object)):
- def start_callback(self, env, log_reader):
+ def start_callback(self, igbhvr_act_inst, log_reader):
pass
- def step_callback(self, env, log_reader):
+ def step_callback(self, igbhvr_act_inst, log_reader):
pass
- def end_callback(self, env, log_reader):
+ def end_callback(self, igbhvr_act_inst, log_reader):
pass
@abstractmethod
diff --git a/igibson/metrics/task.py b/igibson/metrics/task.py
index c4d822a0b..e58cc9c9b 100644
--- a/igibson/metrics/task.py
+++ b/igibson/metrics/task.py
@@ -6,30 +6,31 @@
class TaskMetric(MetricBase):
def __init__(self):
self.satisfied_predicates = []
+ self.q_score = []
self.timesteps = 0
- def start_callback(self, env, _):
- self.render_timestep = env.simulator.render_timestep
+ def start_callback(self, igbhvr_act_instance, _):
+ self.render_timestep = igbhvr_act_instance.simulator.render_timestep
- def step_callback(self, env, _):
+ def step_callback(self, igbhvr_act_inst, _):
self.timesteps += 1
- self.satisfied_predicates.append(env.task.current_goal_status)
-
- def end_callback(self, env, _):
+ self.satisfied_predicates.append(igbhvr_act_inst.current_goal_status)
candidate_q_score = []
- for option in env.task.ground_goal_state_options:
+ for option in igbhvr_act_inst.ground_goal_state_options:
predicate_truth_values = []
for predicate in option:
predicate_truth_values.append(predicate.evaluate())
candidate_q_score.append(np.mean(predicate_truth_values))
- self.final_q_score = np.max(candidate_q_score)
+ self.q_score.append(np.max(candidate_q_score))
def gather_results(self):
return {
"satisfied_predicates": {
"timestep": self.satisfied_predicates,
},
- "q_score": {"final": self.final_q_score},
+ "q_score": {
+ "timestep": self.q_score,
+ },
"time": {
"simulator_steps": self.timesteps,
"simulator_time": self.timesteps * self.render_timestep,
diff --git a/igibson/object_states/__init__.py b/igibson/object_states/__init__.py
index 2feda22ff..95f6b2940 100644
--- a/igibson/object_states/__init__.py
+++ b/igibson/object_states/__init__.py
@@ -14,13 +14,7 @@
from igibson.object_states.on_top import OnTop
from igibson.object_states.open import Open
from igibson.object_states.pose import Pose
-from igibson.object_states.robot_related_states import (
- InFOVOfRobot,
- InHandOfRobot,
- InReachOfRobot,
- InSameRoomAsRobot,
- ObjectsInFOVOfRobot,
-)
+from igibson.object_states.robot_related_states import InFOVOfRobot, InHandOfRobot, InReachOfRobot, InSameRoomAsRobot
from igibson.object_states.room_states import (
ROOM_STATES,
InsideRoomTypes,
diff --git a/igibson/object_states/adjacency.py b/igibson/object_states/adjacency.py
index ba25e98d7..5947fd051 100644
--- a/igibson/object_states/adjacency.py
+++ b/igibson/object_states/adjacency.py
@@ -8,12 +8,12 @@
_MAX_ITERATIONS = 10
_MAX_DISTANCE_VERTICAL = 5.0
-_MAX_DISTANCE_HORIZONTAL = 1.0
+_MAX_DISTANCE_HORIZONTAL = 4.0 #1.0
# How many 2-D bases to try during horizontal adjacency check. When 1, only the standard axes will be considered.
# When 2, standard axes + 45 degree rotated will be considered. The tried axes will be equally spaced. The higher
-# this number, the lower the possibility of false negatives in Inside and NextTo.
-_HORIZONTAL_AXIS_COUNT = 5
+# this number, the lower the possibility of false negatives in Inside.
+_HORIZONTAL_AXIS_COUNT = 3
AxisAdjacencyList = namedtuple("AxisAdjacencyList", ("positive_neighbors", "negative_neighbors"))
@@ -29,7 +29,7 @@ def get_equidistant_coordinate_planes(n_planes):
The samples will cover all 360 degrees (although rotational symmetry
is assumed, e.g. if you take into account the axis index and the
- positive/negative directions, only 1/4 of the possible coordinate (1 quadrant, np.pi / 2.0)
+ positive/negative directions, only 1/4 of the possible coordinate
planes will be sampled: the ones where the first axis' positive direction
is in the first quadrant).
@@ -40,7 +40,7 @@ def get_equidistant_coordinate_planes(n_planes):
corresponding to the axis.
"""
# Compute the positive directions of the 1st axis of each plane.
- first_axis_angles = np.linspace(0, np.pi / 2, n_planes)
+ first_axis_angles = np.linspace(0, np.pi / 4, n_planes)
first_axes = np.stack(
[np.cos(first_axis_angles), np.sin(first_axis_angles), np.zeros_like(first_axis_angles)], axis=1
)
diff --git a/igibson/object_states/dirty.py b/igibson/object_states/dirty.py
index 7ba5f7066..2afcf3dbf 100644
--- a/igibson/object_states/dirty.py
+++ b/igibson/object_states/dirty.py
@@ -1,7 +1,6 @@
from igibson.object_states import AABB
from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState
from igibson.objects.particles import Dust, Stain
-from igibson.utils.constants import SemanticClass
CLEAN_THRESHOLD = 0.5
FLOOR_CLEAN_THRESHOLD = 0.75
@@ -26,7 +25,7 @@ def __init__(self, obj):
self.initial_dump = None
def _initialize(self):
- self.dirt = self.DIRT_CLASS(self.obj, initial_dump=self.initial_dump, class_id=SemanticClass.SCENE_OBJS)
+ self.dirt = self.DIRT_CLASS(self.obj, initial_dump=self.initial_dump)
self.simulator.import_particle_system(self.dirt)
def _get_value(self):
diff --git a/igibson/object_states/factory.py b/igibson/object_states/factory.py
index 1d8b3db67..af2f7ca8c 100644
--- a/igibson/object_states/factory.py
+++ b/igibson/object_states/factory.py
@@ -22,7 +22,6 @@
InsideRoomTypes,
MaxTemperature,
NextTo,
- ObjectsInFOVOfRobot,
OnFloor,
OnTop,
Open,
@@ -50,7 +49,7 @@
"freezable": [Frozen],
"heatSource": [HeatSourceOrSink],
"openable": [Open],
- "robot": ROOM_STATES + [ObjectsInFOVOfRobot],
+ "robot": ROOM_STATES,
"sliceable": [Sliced],
"slicer": [Slicer],
"soakable": [Soaked],
@@ -128,7 +127,7 @@ def get_object_state_instance(state_class, obj, params=None):
return state_class(obj, **params)
-def prepare_object_states(obj, abilities=None):
+def prepare_object_states(obj, abilities=None, online=True):
"""
Prepare the state dictionary for an object by generating the appropriate
object state instances.
@@ -139,6 +138,9 @@ def prepare_object_states(obj, abilities=None):
:param obj: The object to generate states for.
:param abilities: dict in the form of {ability: {param: value}} containing
object abilities and parameters.
+ :param online: Whether or not the states should be generated for an online
+ object. Offline mode involves using dummy objects rather than real state
+ objects.
"""
if abilities is None:
abilities = {}
diff --git a/igibson/object_states/heat_source_or_sink.py b/igibson/object_states/heat_source_or_sink.py
index 6fe7dc973..1bbcb7a1a 100644
--- a/igibson/object_states/heat_source_or_sink.py
+++ b/igibson/object_states/heat_source_or_sink.py
@@ -13,7 +13,6 @@
# The name of the heat source link inside URDF files.
from igibson.objects.visual_marker import VisualMarker
-from igibson.utils.constants import SemanticClass
_HEATING_ELEMENT_LINK_NAME = "heat_source"
@@ -125,13 +124,9 @@ def _initialize(self):
super(HeatSourceOrSink, self)._initialize()
self.initialize_link_mixin()
self.marker = VisualMarker(
- visual_shape=p.GEOM_MESH,
- filename=_HEATING_ELEMENT_MARKER_FILENAME,
- scale=_HEATING_ELEMENT_MARKER_SCALE,
- class_id=SemanticClass.SCENE_OBJS,
- rendering_params={"shadow_caster": True},
+ visual_shape=p.GEOM_MESH, filename=_HEATING_ELEMENT_MARKER_FILENAME, scale=_HEATING_ELEMENT_MARKER_SCALE
)
- self.simulator.import_object(self.marker)
+ self.simulator.import_object(self.marker, use_pbr=False, use_pbr_mapping=False)
self.marker.set_position([0, 0, -100])
def _update(self):
diff --git a/igibson/object_states/inside.py b/igibson/object_states/inside.py
index 39dd09874..ac1ffc39b 100644
--- a/igibson/object_states/inside.py
+++ b/igibson/object_states/inside.py
@@ -10,7 +10,6 @@
from igibson.object_states.object_state_base import BooleanState, RelativeObjectState
from igibson.object_states.pose import Pose
from igibson.object_states.utils import clear_cached_states, sample_kinematics
-from igibson.utils.utils import restoreState
class Inside(PositionalValidationMemoizedObjectStateMixin, KinematicsMixin, RelativeObjectState, BooleanState):
@@ -36,7 +35,7 @@ def _set_value(self, other, new_value, use_ray_casting_method=False):
if sampling_success:
break
else:
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
diff --git a/igibson/object_states/on_floor.py b/igibson/object_states/on_floor.py
index e4679c12c..c3adad462 100644
--- a/igibson/object_states/on_floor.py
+++ b/igibson/object_states/on_floor.py
@@ -8,7 +8,6 @@
from igibson.object_states.utils import clear_cached_states, get_center_extent, sample_kinematics
# TODO: remove after split floors
-from igibson.utils.utils import restoreState
class RoomFloor(object):
@@ -47,7 +46,7 @@ def _set_value(self, other, new_value):
if sampling_success:
break
else:
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
diff --git a/igibson/object_states/on_top.py b/igibson/object_states/on_top.py
index 3ae783ae8..3a4a301c5 100644
--- a/igibson/object_states/on_top.py
+++ b/igibson/object_states/on_top.py
@@ -7,7 +7,6 @@
from igibson.object_states.object_state_base import BooleanState, RelativeObjectState
from igibson.object_states.touching import Touching
from igibson.object_states.utils import clear_cached_states, sample_kinematics
-from igibson.utils.utils import restoreState
class OnTop(PositionalValidationMemoizedObjectStateMixin, RelativeObjectState, BooleanState):
@@ -33,7 +32,7 @@ def _set_value(self, other, new_value, use_ray_casting_method=False):
if sampling_success:
break
else:
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
@@ -50,7 +49,18 @@ def _get_value(self, other, use_ray_casting_method=False):
# Then check vertical adjacency - it's the second least
# costly.
adjacency = self.obj.states[VerticalAdjacency].get_value()
- return (
- other.get_body_id() in adjacency.negative_neighbors
- and other.get_body_id() not in adjacency.positive_neighbors
- )
+
+ # NOTE (njk): This below if-else logic is different than the
+ # original iGibson code. It's necessary because a shelf
+ # has multiple levels and thus, when an object is in one of the
+ # shelves, it is in both the negative_neighbors and
+ # positive_neighbors.
+ if other.category == 'shelf':
+ result = other.get_body_id() in adjacency.negative_neighbors
+ else:
+ result = (
+ other.get_body_id() in adjacency.negative_neighbors
+ and other.get_body_id() not in adjacency.positive_neighbors
+ )
+
+ return result
diff --git a/igibson/object_states/open.py b/igibson/object_states/open.py
index d8be9d175..8db1266e0 100644
--- a/igibson/object_states/open.py
+++ b/igibson/object_states/open.py
@@ -12,51 +12,31 @@
p.JOINT_REVOLUTE: 0.05,
p.JOINT_PRISMATIC: 0.05,
}
-_OPEN_SAMPLING_ATTEMPTS = 5
_METADATA_FIELD = "openable_joint_ids"
-_BOTH_SIDES_METADATA_FIELD = "openable_both_sides"
-def _compute_joint_threshold(joint_info, joint_direction):
+def _compute_joint_threshold(joint_info):
# Convert fractional threshold to actual joint position.
f = _JOINT_THRESHOLD_BY_TYPE[joint_info.jointType]
- closed_end = joint_info.jointLowerLimit if joint_direction == 1 else joint_info.jointUpperLimit
- open_end = joint_info.jointUpperLimit if joint_direction == 1 else joint_info.jointLowerLimit
- threshold = (1 - f) * closed_end + f * open_end
- return threshold, open_end, closed_end
-
-
-def _is_in_range(position, threshold, range_end):
- # Note that we are unable to do an actual range check here because the joint positions can actually go
- # slightly further than the denoted joint limits.
- if range_end > threshold:
- return position > threshold
- else:
- return position < threshold
+ return (1 - f) * joint_info.jointLowerLimit + f * joint_info.jointUpperLimit
def _get_relevant_joints(obj):
if not hasattr(obj, "metadata"):
- return None, None, None
-
- both_sides = obj.metadata[_BOTH_SIDES_METADATA_FIELD] if _BOTH_SIDES_METADATA_FIELD in obj.metadata else False
+ return None
# Get joint IDs and names from metadata annotation. If object doesn't have the openable metadata,
# we stop here and return Open=False.
- if _METADATA_FIELD not in obj.metadata:
+ if _METADATA_FIELD not in obj.metadata or obj.metadata[_METADATA_FIELD] == []:
print("No openable joint metadata found for object %s" % obj.name)
- return None, None, None
+ return None
joint_metadata = obj.metadata[_METADATA_FIELD]
-
- # The joint metadata is in the format of [(joint_id, joint_name), ...] for legacy annotations and
- # [(joint_id, joint_name, joint_direction), ...] for direction-annotated objects.
- joint_names = [m[1] for m in joint_metadata]
- joint_directions = [m[2] if len(m) > 2 else 1 for m in joint_metadata]
+ _, joint_names = tuple(zip(*joint_metadata))
if not joint_names:
print("No openable joint was listed in metadata for object %s" % obj.name)
- return None, None, None
+ return None
joint_names = set(obj.get_prefixed_joint_name(joint_name).encode(encoding="utf-8") for joint_name in joint_names)
# Get joint infos and compute openness thresholds.
@@ -74,99 +54,49 @@ def _get_relevant_joints(obj):
)
assert all(joint_info.jointType in _JOINT_THRESHOLD_BY_TYPE.keys() for joint_info in relevant_joint_infos)
- return both_sides, relevant_joint_infos, joint_directions
+ return relevant_joint_infos
class Open(CachingEnabledObjectState, BooleanState):
def _compute_value(self):
- both_sides, relevant_joint_infos, joint_directions = _get_relevant_joints(self.obj)
+ relevant_joint_infos = _get_relevant_joints(self.obj)
if not relevant_joint_infos:
return False
- # The "sides" variable is used to check open/closed state for objects whose joints can switch
- # positions. These objects are annotated with the both_sides annotation and the idea is that switching
- # the directions of *all* of the joints results in a similarly valid checkable state. As a result, to check
- # each "side", we multiply *all* of the joint directions with the coefficient belonging to that side, which
- # may be 1 or -1.
- sides = [1, -1] if both_sides else [1]
-
- sides_openness = []
- for side in sides:
- # Compute a boolean openness state for each joint by comparing positions to thresholds.
- joint_ids = [joint_info.jointIndex for joint_info in relevant_joint_infos]
- joint_thresholds = (
- _compute_joint_threshold(joint_info, joint_direction * side)
- for joint_info, joint_direction in zip(relevant_joint_infos, joint_directions)
- )
- joint_positions = utils.get_joint_positions(self.obj.get_body_id(), joint_ids)
- joint_openness = (
- _is_in_range(position, threshold, open_end)
- for position, (threshold, open_end, closed_end) in zip(joint_positions, joint_thresholds)
- )
-
- # Looking from this side, the object is open if any of its joints is open.
- sides_openness.append(any(joint_openness))
-
- # The object is open only if it's open from all of its sides.
- return all(sides_openness)
-
- def _set_value(self, new_value, fully=False):
- """
- Set the openness state, either to a random joint position satisfying the new value, or fully open/closed.
-
- @param new_value: bool value for the openness state of the object.
- @param fully: whether the object should be fully opened/closed (e.g. all relevant joints to 0/1).
- @return: bool indicating setter success. Failure may happen due to unannotated objects.
- """
- both_sides, relevant_joint_infos, joint_directions = _get_relevant_joints(self.obj)
+ # Compute a boolean openness state for each joint by comparing positions to thresholds.
+ joint_ids = [joint_info.jointIndex for joint_info in relevant_joint_infos]
+ joint_thresholds = (_compute_joint_threshold(joint_info) for joint_info in relevant_joint_infos)
+ joint_positions = utils.get_joint_positions(self.obj.get_body_id(), joint_ids)
+ joint_openness = (position > threshold for position, threshold in zip(joint_positions, joint_thresholds))
+
+ # Return open if any joint is open, false otherwise.
+ return any(joint_openness)
+
+ def _set_value(self, new_value):
+ relevant_joint_infos = _get_relevant_joints(self.obj)
if not relevant_joint_infos:
return False
- # The "sides" variable is used to check open/closed state for objects whose joints can switch
- # positions. These objects are annotated with the both_sides annotation and the idea is that switching
- # the directions of *all* of the joints results in a similarly valid checkable state. We want our object to be
- # open from *both* of the two sides, and I was too lazy to implement the logic for this without rejection
- # sampling, so that's what we do.
- # TODO: Implement a sampling method that's guaranteed to be correct, ditch the rejection method.
- sides = [1, -1] if both_sides else [1]
-
- for _ in range(_OPEN_SAMPLING_ATTEMPTS):
- side = random.choice(sides)
-
- # All joints are relevant if we are closing, but if we are opening let's sample a subset.
- if new_value and not fully:
- num_to_open = random.randint(1, len(relevant_joint_infos))
- relevant_joint_infos = random.sample(relevant_joint_infos, num_to_open)
-
- # Go through the relevant joints & set random positions.
- for joint_info, joint_direction in zip(relevant_joint_infos, joint_directions):
- threshold, open_end, closed_end = _compute_joint_threshold(joint_info, joint_direction * side)
-
- # Get the range
- if new_value:
- joint_range = (threshold, open_end)
- else:
- joint_range = (threshold, closed_end)
-
- if fully:
- joint_pos = joint_range[1]
- else:
- # Convert the range to the format numpy accepts.
- low = min(joint_range)
- high = max(joint_range)
-
- # Sample a position.
- joint_pos = random.uniform(low, high)
-
- # Save sampled position.
- utils.set_joint_position(self.obj.get_body_id(), joint_info.jointIndex, joint_pos)
-
- # If we succeeded, return now.
- if self._compute_value() == new_value:
- return True
-
- # We exhausted our attempts and could not find a working sample.
- return False
+ # All joints are relevant if we are closing, but if we are opening let's sample a subset.
+ if new_value:
+ num_to_open = random.randint(1, len(relevant_joint_infos))
+ relevant_joint_infos = random.sample(relevant_joint_infos, num_to_open)
+
+ # Go through the relevant joints & set random positions.
+ for joint_info in relevant_joint_infos:
+ joint_threshold = _compute_joint_threshold(joint_info)
+
+ if new_value:
+ # Sample an open position.
+ joint_pos = random.uniform(joint_threshold, joint_info.jointUpperLimit)
+ else:
+ # Sample a closed position.
+ joint_pos = random.uniform(joint_info.jointLowerLimit, joint_threshold)
+
+ # Save sampled position.
+ utils.set_joint_position(self.obj.get_body_id(), joint_info.jointIndex, joint_pos)
+
+ return True
# We don't need to do anything here - since the joints are saved, this should work directly.
def _dump(self):
diff --git a/igibson/object_states/robot_related_states.py b/igibson/object_states/robot_related_states.py
index 69611ae5c..6cfefdaf8 100644
--- a/igibson/object_states/robot_related_states.py
+++ b/igibson/object_states/robot_related_states.py
@@ -33,7 +33,7 @@ def _compute_value(self):
if not robot:
return False
- robot_pos = robot.links["body"].get_position()
+ robot_pos = robot.parts["body"].get_position()
object_pos, _ = self.obj.states[Pose].get_value()
return np.linalg.norm(object_pos - np.array(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
@@ -62,7 +62,7 @@ def _compute_value(self):
if not scene or not hasattr(scene, "get_room_instance_by_point"):
return False
- robot_pos = robot.links["body"].get_position()
+ robot_pos = robot.parts["body"].get_position()
robot_room = scene.get_room_instance_by_point(np.array(robot_pos[:2]))
object_rooms = self.obj.states[InsideRoomTypes].get_value()
@@ -99,16 +99,13 @@ def load(self, data):
class InFOVOfRobot(CachingEnabledObjectState, BooleanState):
- @staticmethod
- def get_optional_dependencies():
- return CachingEnabledObjectState.get_optional_dependencies() + [ObjectsInFOVOfRobot]
-
def _compute_value(self):
- robot = _get_behavior_robot(self.simulator)
- if not robot:
- return False
-
- return self.obj.get_body_id() in robot.links["body"].states[ObjectsInFOVOfRobot].get_value()
+ seg = self.simulator.renderer.render_robot_cameras(modes="ins_seg")[0][:, :, 0]
+ seg = np.round(seg * MAX_INSTANCE_COUNT)
+ main_body_instances = [
+ inst.id for inst in self.obj.renderer_instances if inst.pybullet_uuid == self.obj.get_body_id()
+ ]
+ return np.any(np.isin(seg, main_body_instances))
def _set_value(self, new_value):
raise NotImplementedError("InFOVOfRobot state currently does not support setting.")
@@ -119,28 +116,3 @@ def _dump(self):
def load(self, data):
pass
-
-
-class ObjectsInFOVOfRobot(CachingEnabledObjectState):
- def __init__(self, *args, **kwargs):
- super(CachingEnabledObjectState, self).__init__(*args, **kwargs)
-
- def _compute_value(self):
- # Pass the FOV through the instance-to-body ID mapping.
- seg = self.obj.parent.render_camera_image(modes="ins_seg")[0][:, :, 0]
- seg = np.round(seg * MAX_INSTANCE_COUNT).astype(int)
- body_ids = self.simulator.renderer.get_pb_ids_for_instance_ids(seg)
-
- # Pixels that don't contain an object are marked -1 but we don't want to include that
- # as a body ID.
- return set(np.unique(body_ids)) - {-1}
-
- def _set_value(self, new_value):
- raise NotImplementedError("ObjectsInFOVOfRobot state currently does not support setting.")
-
- # Nothing to do here.
- def _dump(self):
- pass
-
- def load(self, data):
- pass
diff --git a/igibson/object_states/soaked.py b/igibson/object_states/soaked.py
index e41e90b2c..ca74bf521 100644
--- a/igibson/object_states/soaked.py
+++ b/igibson/object_states/soaked.py
@@ -24,7 +24,7 @@ def _update(self):
item.bodyUniqueIdB for item in list(self.obj.states[ContactBodies].get_value())
)
for particle in water_source_obj.states[WaterSource].water_stream.get_active_particles():
- if particle.get_body_id() in contacted_water_body_ids:
+ if particle.body_id in contacted_water_body_ids:
self.value = True
self.update_texture()
diff --git a/igibson/object_states/toggle.py b/igibson/object_states/toggle.py
index 892ccae40..3d678f66d 100644
--- a/igibson/object_states/toggle.py
+++ b/igibson/object_states/toggle.py
@@ -1,10 +1,11 @@
+import numpy as np
import pybullet as p
from igibson.object_states.link_based_state_mixin import LinkBasedStateMixin
from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState
from igibson.object_states.texture_change_state_mixin import TextureChangeStateMixin
from igibson.objects.visual_marker import VisualMarker
-from igibson.utils.constants import PyBulletSleepState, SemanticClass, SimulatorMode
+from igibson.utils.constants import PyBulletSleepState
from igibson.utils.utils import brighten_texture
_TOGGLE_DISTANCE_THRESHOLD = 0.1
@@ -34,18 +35,8 @@ def get_state_link_name():
def _initialize(self):
super(ToggledOn, self)._initialize()
if self.initialize_link_mixin():
- self.visual_marker_on = VisualMarker(
- rgba_color=[0, 1, 0, 0.5],
- radius=_TOGGLE_BUTTON_RADIUS,
- class_id=SemanticClass.SCENE_OBJS,
- rendering_params={"use_pbr": True, "use_pbr_mapping": True},
- )
- self.visual_marker_off = VisualMarker(
- rgba_color=[1, 0, 0, 0.5],
- radius=_TOGGLE_BUTTON_RADIUS,
- class_id=SemanticClass.SCENE_OBJS,
- rendering_params={"use_pbr": True, "use_pbr_mapping": True},
- )
+ self.visual_marker_on = VisualMarker(rgba_color=[0, 1, 0, 0.5], radius=_TOGGLE_BUTTON_RADIUS)
+ self.visual_marker_off = VisualMarker(rgba_color=[1, 0, 0, 0.5], radius=_TOGGLE_BUTTON_RADIUS)
self.simulator.import_object(self.visual_marker_on)
self.visual_marker_on.set_position(_TOGGLE_MARKER_OFF_POSITION)
self.simulator.import_object(self.visual_marker_off)
@@ -73,7 +64,7 @@ def _update(self):
# swap two types of markers when toggled
# when hud overlay is on, we show the toggle buttons, otherwise the buttons are hidden
- if self.simulator.mode == SimulatorMode.VR:
+ if self.simulator.can_access_vr_context:
hud_overlay_show_state = self.simulator.get_hud_show_state()
else:
hud_overlay_show_state = False
diff --git a/igibson/object_states/under.py b/igibson/object_states/under.py
index b669e350e..83da0f1ac 100644
--- a/igibson/object_states/under.py
+++ b/igibson/object_states/under.py
@@ -6,7 +6,6 @@
from igibson.object_states.memoization import PositionalValidationMemoizedObjectStateMixin
from igibson.object_states.object_state_base import BooleanState, RelativeObjectState
from igibson.object_states.utils import clear_cached_states, sample_kinematics
-from igibson.utils.utils import restoreState
class Under(PositionalValidationMemoizedObjectStateMixin, RelativeObjectState, BooleanState):
@@ -30,7 +29,7 @@ def _set_value(self, other, new_value):
if sampling_success:
break
else:
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
diff --git a/igibson/object_states/utils.py b/igibson/object_states/utils.py
index 5bed31f68..fa272305a 100644
--- a/igibson/object_states/utils.py
+++ b/igibson/object_states/utils.py
@@ -1,9 +1,6 @@
-import itertools
-
import cv2
import numpy as np
import pybullet as p
-import trimesh
from IPython import embed
from scipy.spatial.transform import Rotation as R
@@ -18,8 +15,7 @@
)
from igibson.object_states.aabb import AABB
from igibson.object_states.object_state_base import CachingEnabledObjectState
-from igibson.utils import sampling_utils, utils
-from igibson.utils.utils import restoreState
+from igibson.utils import sampling_utils
_ON_TOP_RAY_CASTING_SAMPLING_PARAMS = {
# "hit_to_plane_threshold": 0.1, # TODO: Tune this parameter.
@@ -65,27 +61,8 @@ def detect_collision(bodyA):
return collision
-def detect_collision(bodyA):
- collision = False
- for body_id in range(p.getNumBodies()):
- if body_id == bodyA:
- continue
- closest_points = p.getClosestPoints(bodyA, body_id, distance=0.01)
- if len(closest_points) > 0:
- collision = True
- break
- return collision
-
-
def sample_kinematics(
- predicate,
- objA,
- objB,
- binary_state,
- use_ray_casting_method=False,
- max_trials=100,
- z_offset=0.05,
- skip_falling=False,
+ predicate, objA, objB, binary_state, use_ray_casting_method=False, max_trials=100, z_offset=0.05, skip_falling=False
):
if not binary_state:
raise NotImplementedError()
@@ -127,19 +104,16 @@ def sample_kinematics(
else:
assert False, "predicate is not onTop or inside: {}".format(predicate)
- # Retrieve base CoM frame-aligned bounding box parallel to the XY plane
- parallel_bbox_center, parallel_bbox_orn, parallel_bbox_extents, _ = objA.get_base_aligned_bounding_box(
- xy_aligned=True
- )
+ aabb = get_aabb(objA.get_body_id())
+ aabb_center, aabb_extent = get_aabb_center(aabb), get_aabb_extent(aabb)
# TODO: Get this to work with non-URDFObject objects.
sampling_results = sampling_utils.sample_cuboid_on_object(
objB,
num_samples=1,
- cuboid_dimensions=parallel_bbox_extents,
+ cuboid_dimensions=aabb_extent,
axis_probabilities=[0, 0, 1],
refuse_downwards=True,
- undo_padding=True,
**params
)
@@ -148,22 +122,18 @@ def sample_kinematics(
sampling_success = sampled_vector is not None
if sampling_success:
- # Move the object from the original parallel bbox to the sampled bbox
- parallel_bbox_rotation = R.from_quat(parallel_bbox_orn)
+ # Find the delta from the object's CoM to its AABB centroid
+ diff = old_pos - aabb_center
+
sample_rotation = R.from_quat(sampled_quaternion)
original_rotation = R.from_quat(orientation)
+ combined_rotation = sample_rotation * original_rotation
- # The additional orientation to be applied should be the delta orientation
- # between the parallel bbox orientation and the sample orientation
- additional_rotation = sample_rotation * parallel_bbox_rotation.inv()
- combined_rotation = additional_rotation * original_rotation
- orientation = combined_rotation.as_quat()
+ # Rotate it using the quaternion
+ rotated_diff = sample_rotation.apply(diff)
- # The delta vector between the base CoM frame and the parallel bbox center needs to be rotated
- # by the same additional orientation
- diff = old_pos - parallel_bbox_center
- rotated_diff = additional_rotation.apply(diff)
pos = sampled_vector + rotated_diff
+ orientation = combined_rotation.as_quat()
else:
random_idx = np.random.randint(len(objB.supporting_surfaces[predicate].keys()))
body_id, link_id = list(objB.supporting_surfaces[predicate].keys())[random_idx]
@@ -210,14 +180,14 @@ def sample_kinematics(
objA.set_position_orientation(pos, orientation)
success = not detect_collision(objA.get_body_id()) # len(p.getContactPoints(objA.get_body_id())) == 0
- if igibson.debug_sampling:
+ if True: #igibson.debug_sampling:
print("sample_kinematics", success)
embed()
if success:
break
else:
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
@@ -231,3 +201,162 @@ def sample_kinematics(
break
return success
+
+def continuous_param_kinematics(
+ predicate, objA, objB, binary_state, continuous_params, use_ray_casting_method=False, max_trials=100, z_offset=0.05, skip_falling=False
+):
+ if not binary_state:
+ raise NotImplementedError()
+
+ sample_on_floor = predicate == "onFloor"
+
+ if not use_ray_casting_method and not sample_on_floor and predicate not in objB.supporting_surfaces:
+ return False
+
+ objA.force_wakeup()
+ if not sample_on_floor:
+ objB.force_wakeup()
+
+ state_id = p.saveState()
+ for i in range(max_trials):
+ pos = None
+ if hasattr(objA, "orientations") and objA.orientations is not None:
+ orientation = objA.sample_orientation()
+ else:
+ orientation = [0, 0, 0, 1]
+
+ # Orientation needs to be set for stable_z_on_aabb to work correctly
+ # Position needs to be set to be very far away because the object's
+ # original position might be blocking rays (use_ray_casting_method=True)
+ old_pos = np.array([200, 200, 200])
+ objA.set_position_orientation(old_pos, orientation)
+
+ # TODO HERE Add this continuous_param
+
+ if sample_on_floor:
+ # TODO Warning Place on Floor does not check
+ #_, pos = objB.scene.get_random_point_by_room_instance(objB.room_instance)
+ pos = continuous_params
+
+ if pos is not None:
+ pos[2] = stable_z_on_aabb(objA.get_body_id(), ([0, 0, pos[2]], [0, 0, pos[2]]))
+ else:
+ if use_ray_casting_method:
+ if predicate == "onTop":
+ params = _ON_TOP_RAY_CASTING_SAMPLING_PARAMS
+ elif predicate == "inside":
+ params = _INSIDE_RAY_CASTING_SAMPLING_PARAMS
+ else:
+ assert False, "predicate is not onTop or inside: {}".format(predicate)
+
+ aabb = get_aabb(objA.get_body_id())
+ aabb_center, aabb_extent = get_aabb_center(aabb), get_aabb_extent(aabb)
+
+ # TODO: Get this to work with non-URDFObject objects.
+ # sampling_results = sampling_utils.sample_cuboid_on_object(
+ # objB,
+ # num_samples=1,
+ # cuboid_dimensions=aabb_extent,
+ # axis_probabilities=[0, 0, 1],
+ # refuse_downwards=True,
+ # **params
+ # )
+ sampling_results = [[continuous_params[0:3], None, continuous_params[3:7]]]
+
+ sampling_success = sampling_utils.check_sample_cuboid_on_object(
+ sampling_results,
+ objB,
+ num_samples=1,
+ cuboid_dimensions=aabb_extent,
+ axis_probabilities=[0, 0, 1],
+ refuse_downwards=True,
+ **params
+ )
+
+ sampled_vector = sampling_results[0][0]
+ sampled_quaternion = sampling_results[0][2]
+
+ #sampling_success = sampled_vector is not None
+ if sampling_success:
+ # Find the delta from the object's CoM to its AABB centroid
+ diff = old_pos - aabb_center
+
+ sample_rotation = R.from_quat(sampled_quaternion)
+ original_rotation = R.from_quat(orientation)
+ combined_rotation = sample_rotation * original_rotation
+
+ # Rotate it using the quaternion
+ rotated_diff = sample_rotation.apply(diff)
+
+ pos = sampled_vector + rotated_diff
+ orientation = combined_rotation.as_quat()
+ else:
+ random_idx = np.random.randint(len(objB.supporting_surfaces[predicate].keys()))
+ body_id, link_id = list(objB.supporting_surfaces[predicate].keys())[random_idx]
+ random_height_idx = np.random.randint(len(objB.supporting_surfaces[predicate][(body_id, link_id)]))
+ height, height_map = objB.supporting_surfaces[predicate][(body_id, link_id)][random_height_idx]
+ obj_half_size = np.max(objA.bounding_box) / 2 * 100
+ obj_half_size_scaled = np.array([obj_half_size / objB.scale[1], obj_half_size / objB.scale[0]])
+ obj_half_size_scaled = np.ceil(obj_half_size_scaled).astype(np.int)
+ height_map_eroded = cv2.erode(height_map, np.ones(obj_half_size_scaled, np.uint8))
+
+ valid_pos = np.array(height_map_eroded.nonzero())
+ if valid_pos.shape[1] != 0:
+ random_pos_idx = np.random.randint(valid_pos.shape[1])
+ random_pos = valid_pos[:, random_pos_idx]
+ y_map, x_map = random_pos
+ y = y_map / 100.0 - 2
+ x = x_map / 100.0 - 2
+ z = height
+
+ pos = np.array([x, y, z])
+ pos *= objB.scale
+
+ # the supporting surface is defined w.r.t to the link frame, not
+ # the inertial frame
+ if link_id == -1:
+ link_pos, link_orn = p.getBasePositionAndOrientation(body_id)
+ dynamics_info = p.getDynamicsInfo(body_id, -1)
+ inertial_pos = dynamics_info[3]
+ inertial_orn = dynamics_info[4]
+ inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn)
+ link_pos, link_orn = p.multiplyTransforms(
+ link_pos, link_orn, inv_inertial_pos, inv_inertial_orn
+ )
+ else:
+ link_pos, link_orn = get_link_pose(body_id, link_id)
+ pos = matrix_from_quat(link_orn).dot(pos) + np.array(link_pos)
+ z = stable_z_on_aabb(objA.get_body_id(), ([0, 0, pos[2]], [0, 0, pos[2]]))
+ pos[2] = z
+
+ if pos is None:
+ success = False
+ else:
+ pos[2] += z_offset
+ objA.set_position_orientation(pos, orientation)
+ # TODO (wmcclinton) switch ^ with: iGibson/igibson/utils/behavior_robot_planning_utils.py -> plan_hand_motion_br
+ # May need to get the transform of the obj relative to the hand and then use that to get the hand position and orientation after the object is placed
+ success = not detect_collision(objA.get_body_id()) # len(p.getContactPoints(objA.get_body_id())) == 0
+
+ if igibson.debug_sampling:
+ print("sample_kinematics", success)
+ embed()
+
+ if success:
+ break
+ else:
+ p.restoreState(state_id)
+
+ if success and not skip_falling:
+ objA.set_position_orientation(pos, orientation)
+ # Let it fall for 0.2 second
+ physics_timestep = p.getPhysicsEngineParameters()["fixedTimeStep"]
+ for _ in range(int(0.2 / physics_timestep)):
+ p.stepSimulation()
+ if len(p.getContactPoints(bodyA=objA.get_body_id())) > 0:
+ break
+
+ p.restoreState(state_id)
+ p.removeState(state_id)
+
+ return success
\ No newline at end of file
diff --git a/igibson/object_states/water_source.py b/igibson/object_states/water_source.py
index 2efd8eebf..6332ca244 100644
--- a/igibson/object_states/water_source.py
+++ b/igibson/object_states/water_source.py
@@ -5,7 +5,6 @@
from igibson.object_states.object_state_base import AbsoluteObjectState
from igibson.object_states.toggle import ToggledOn
from igibson.objects.particles import WaterStream
-from igibson.utils.constants import SemanticClass
_WATER_SOURCE_LINK_NAME = "water_source"
@@ -42,9 +41,7 @@ def _initialize(self):
return
water_source_position = list(np.array(water_source_position) + _OFFSET_FROM_LINK)
- self.water_stream = WaterStream(
- water_source_position, num=_NUM_DROPS, initial_dump=self.initial_dump, class_id=SemanticClass.SCENE_OBJS
- )
+ self.water_stream = WaterStream(water_source_position, num=_NUM_DROPS, initial_dump=self.initial_dump)
self.simulator.import_particle_system(self.water_stream)
del self.initial_dump
@@ -65,7 +62,7 @@ def _update(self):
# water reusing logic
contacted_water_body_ids = set(item.bodyUniqueIdB for item in list(self.obj.states[ContactBodies].get_value()))
for particle in self.water_stream.get_active_particles():
- if particle.get_body_id() in contacted_water_body_ids:
+ if particle.body_id in contacted_water_body_ids:
self.water_stream.stash_particle(particle)
def _set_value(self, new_value):
diff --git a/igibson/objects/articulated_object.py b/igibson/objects/articulated_object.py
index 3b84c58b2..dc9937bce 100644
--- a/igibson/objects/articulated_object.py
+++ b/igibson/objects/articulated_object.py
@@ -1,4 +1,3 @@
-import itertools
import json
import logging
import math
@@ -9,32 +8,24 @@
import xml.etree.ElementTree as ET
import cv2
-import networkx as nx
import numpy as np
import pybullet as p
import trimesh
-from scipy.spatial.transform import Rotation
import igibson
from igibson.external.pybullet_tools.utils import (
- get_all_links,
- get_center_extent,
get_joint_info,
get_joints,
- get_link_name,
- get_link_state,
link_from_name,
matrix_from_quat,
quat_from_matrix,
set_joint_position,
)
+from igibson.object_states.factory import prepare_object_states
from igibson.object_states.texture_change_state_mixin import TextureChangeStateMixin
-from igibson.objects.object_base import NonRobotObject, SingleBodyObject
+from igibson.object_states.utils import clear_cached_states
from igibson.objects.stateful_object import StatefulObject
from igibson.render.mesh_renderer.materials import ProceduralMaterial, RandomizedMaterial
-from igibson.utils import utils
-from igibson.utils.constants import SemanticClass
-from igibson.utils.semantics_utils import CLASS_NAME_TO_CLASS_ID
from igibson.utils.urdf_utils import add_fixed_link, get_base_link_name, round_up, save_urdfs_without_floating_joints
from igibson.utils.utils import get_transform_from_xyz_rpy, quatXYZWFromRotMat, rotate_vector_3d
@@ -48,19 +39,19 @@
OBJECT_TAXONOMY = None
-class ArticulatedObject(StatefulObject, SingleBodyObject):
+class ArticulatedObject(StatefulObject):
"""
Articulated objects are defined in URDF files.
They are passive (no motors).
"""
- def __init__(self, filename, scale=1, merge_fixed_links=True, **kwargs):
- super(ArticulatedObject, self).__init__(**kwargs)
+ def __init__(self, filename, scale=1, merge_fixed_links=True):
+ super(ArticulatedObject, self).__init__()
self.filename = filename
self.scale = scale
self.merge_fixed_links = merge_fixed_links
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -71,18 +62,39 @@ def _load(self, simulator):
body_id = p.loadURDF(self.filename, globalScaling=self.scale, flags=flags)
self.mass = p.getDynamicsInfo(body_id, -1)[0]
+ self.body_id = body_id
+ self.create_link_name_to_vm_map(body_id)
+ return body_id
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
+ def create_link_name_to_vm_map(self, body_id):
+ self.link_name_to_vm = []
+ link_name_to_vm_urdf = {}
+ for visual_shape in p.getVisualShapeData(body_id):
+ id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = visual_shape[:8]
+ try:
+ if link_id == -1:
+ link_name = p.getBodyInfo(id)[0].decode("utf-8")
+ else:
+ link_name = p.getJointInfo(id, link_id)[12].decode("utf-8")
+ if not link_name in link_name_to_vm_urdf:
+ link_name_to_vm_urdf[link_name] = []
+ else:
+ raise ValueError("link name clashing")
+ link_name_to_vm_urdf[link_name].append(filename.decode("utf-8"))
+ except:
+ pass
+ self.link_name_to_vm = [link_name_to_vm_urdf]
def force_wakeup(self):
"""
Force wakeup sleeping objects
"""
- for joint_id in range(p.getNumJoints(self.get_body_id())):
- p.changeDynamics(self.get_body_id(), joint_id, activationState=p.ACTIVATION_STATE_WAKE_UP)
- p.changeDynamics(self.get_body_id(), -1, activationState=p.ACTIVATION_STATE_WAKE_UP)
+ for joint_id in range(p.getNumJoints(self.body_id)):
+ p.changeDynamics(self.body_id, joint_id, activationState=p.ACTIVATION_STATE_WAKE_UP)
+ p.changeDynamics(self.body_id, -1, activationState=p.ACTIVATION_STATE_WAKE_UP)
+
+ def get_body_id(self):
+ return self.body_id
class RBOObject(ArticulatedObject):
@@ -96,7 +108,7 @@ def __init__(self, name, scale=1):
super(RBOObject, self).__init__(filename, scale)
-class URDFObject(StatefulObject, NonRobotObject):
+class URDFObject(StatefulObject):
"""
URDFObjects are instantiated from a URDF file. They can be composed of one
or more links and joints. They should be passive. We use this class to
@@ -127,9 +139,6 @@ def __init__(
joint_positions=None,
merge_fixed_links=True,
ignore_visual_shape=False,
- class_id=None,
- rendering_params=None,
- **kwargs
):
"""
:param filename: urdf file path of that object model
@@ -152,22 +161,9 @@ def __init__(
:param visualize_primitives: whether to render geometric primitives
:param joint_positions: Joint positions, keyed by body index and joint name, in the form of
List[Dict[name, position]]
- :param class_id: Class ID to override default class ID with (default will be constructed using category)
- :param rendering_params: Rendering params to override default category-based rendering params.
"""
- # Load abilities from taxonomy if needed & possible
- if abilities is None:
- if OBJECT_TAXONOMY is not None:
- taxonomy_class = OBJECT_TAXONOMY.get_class_name_from_igibson_category(category)
- if taxonomy_class is not None:
- abilities = OBJECT_TAXONOMY.get_abilities(taxonomy_class)
- else:
- abilities = {}
- else:
- abilities = {}
- assert isinstance(abilities, dict), "Object abilities must be in dictionary form."
+ super(URDFObject, self).__init__()
- # Save important arguments.
self.name = name
self.category = category
self.in_rooms = in_rooms
@@ -183,20 +179,19 @@ def __init__(
self.room_floor = None
self.ignore_visual_shape = ignore_visual_shape
- # Update rendering and class parameters.
- final_rendering_params = {}
- if self.category in ["walls", "floors", "ceilings"]:
- final_rendering_params["use_pbr"] = False
- final_rendering_params["use_pbr_mapping"] = False
- if self.category == "ceilings":
- final_rendering_params["shadow_caster"] = False
- if rendering_params:
- final_rendering_params.update(rendering_params)
- if class_id is None:
- class_id = CLASS_NAME_TO_CLASS_ID.get(self.category, SemanticClass.SCENE_OBJS)
- super(URDFObject, self).__init__(
- abilities=abilities, class_id=class_id, rendering_params=final_rendering_params
- )
+ # Load abilities from taxonomy if needed & possible
+ if abilities is None:
+ if OBJECT_TAXONOMY is not None:
+ taxonomy_class = OBJECT_TAXONOMY.get_class_name_from_igibson_category(self.category)
+ if taxonomy_class is not None:
+ abilities = OBJECT_TAXONOMY.get_abilities(taxonomy_class)
+ else:
+ abilities = {}
+ else:
+ abilities = {}
+
+ assert isinstance(abilities, dict), "Object abilities must be in dictionary form."
+ self.abilities = abilities
# Friction for all prismatic and revolute joints
if joint_friction is not None:
@@ -221,7 +216,7 @@ def __init__(
self.is_fixed = []
self.main_body = -1
- logging.info("Category " + self.category)
+ # logging.info("Category " + self.category)
self.filename = filename
dirname = os.path.dirname(filename)
urdf = os.path.basename(filename)
@@ -230,7 +225,7 @@ def __init__(
if os.path.exists(simplified_urdf):
self.filename = simplified_urdf
filename = simplified_urdf
- logging.info("Loading the following URDF template " + filename)
+ # logging.info("Loading the following URDF template " + filename)
self.object_tree = ET.parse(filename) # Parse the URDF
if not visualize_primitives:
@@ -310,7 +305,7 @@ def __init__(
if self.bounding_box is None:
self.overwrite_inertial = False
- logging.info("Scale: " + np.array2string(scale))
+ # logging.info("Scale: " + np.array2string(scale))
# We need to know where the base_link origin is wrt. the bounding box
# center. That allows us to place the model correctly since the joint
@@ -320,7 +315,6 @@ def __init__(
self.avg_obj_dims = avg_obj_dims
- self.base_link_name = get_base_link_name(self.object_tree)
self.rename_urdf()
self.meta_links = {}
@@ -329,8 +323,8 @@ def __init__(
self.scale_object()
self.compute_object_pose()
self.remove_floating_joints(self.scene_instance_folder)
- self.prepare_link_based_bounding_boxes()
+ prepare_object_states(self, abilities, online=True)
self.prepare_visual_mesh_to_material()
def set_ignore_visual_shape(self, value):
@@ -460,40 +454,49 @@ def sample_orientation(self):
def get_prefixed_joint_name(self, name):
return self.name + "_" + name
- def get_prefixed_link_name(self, name):
- if name == "world":
- return name
- elif name == self.base_link_name:
- # The base_link get renamed as the link tag indicates
- # Just change the name of the base link in the embedded urdf
- return self.name
- else:
- # The other links get also renamed to add the name of the link tag as prefix
- # This allows us to load several instances of the same object
- return self.name + "_" + name
-
def rename_urdf(self):
"""
Helper function that renames the file paths in the object urdf
from relative paths to absolute paths
"""
+ base_link_name = get_base_link_name(self.object_tree)
# Change the links of the added object to adapt to the given name
for link_emb in self.object_tree.iter("link"):
- link_emb.attrib["name"] = self.get_prefixed_link_name(link_emb.attrib["name"])
+ # If the original urdf already contains world link, do not rename
+ if link_emb.attrib["name"] == "world":
+ pass
+ elif link_emb.attrib["name"] == base_link_name:
+ # The base_link get renamed as the link tag indicates
+ # Just change the name of the base link in the embedded urdf
+ link_emb.attrib["name"] = self.name
+ else:
+ # The other links get also renamed to add the name of the link tag as prefix
+ # This allows us to load several instances of the same object
+ link_emb.attrib["name"] = self.name + "_" + link_emb.attrib["name"]
# Change the joints of the added object to adapt them to the given name
for joint_emb in self.object_tree.iter("joint"):
# We change the joint name
joint_emb.attrib["name"] = self.get_prefixed_joint_name(joint_emb.attrib["name"])
-
# We change the child link names
for child_emb in joint_emb.findall("child"):
- child_emb.attrib["link"] = self.get_prefixed_link_name(child_emb.attrib["link"])
-
+ # If the original urdf already contains world link, do not rename
+ if child_emb.attrib["link"] == "world":
+ pass
+ elif child_emb.attrib["link"] == base_link_name:
+ child_emb.attrib["link"] = self.name
+ else:
+ child_emb.attrib["link"] = self.name + "_" + child_emb.attrib["link"]
# and the parent link names
for parent_emb in joint_emb.findall("parent"):
- parent_emb.attrib["link"] = self.get_prefixed_link_name(parent_emb.attrib["link"])
+ # If the original urdf already contains world link, do not rename
+ if parent_emb.attrib["link"] == "world":
+ pass
+ elif parent_emb.attrib["link"] == base_link_name:
+ parent_emb.attrib["link"] = self.name
+ else:
+ parent_emb.attrib["link"] = self.name + "_" + parent_emb.attrib["link"]
def scale_object(self):
"""
@@ -506,16 +509,16 @@ def scale_object(self):
# First, define the scale in each link reference frame
# and apply it to the joint values
- base_link_name = self.get_prefixed_link_name(self.base_link_name)
- self.scales_in_link_frame = {base_link_name: self.scale}
+ base_link_name = get_base_link_name(self.object_tree)
+ scales_in_lf = {base_link_name: self.scale}
all_processed = False
while not all_processed:
all_processed = True
for joint in self.object_tree.iter("joint"):
parent_link_name = joint.find("parent").attrib["link"]
child_link_name = joint.find("child").attrib["link"]
- if parent_link_name in self.scales_in_link_frame and child_link_name not in self.scales_in_link_frame:
- scale_in_parent_lf = self.scales_in_link_frame[parent_link_name]
+ if parent_link_name in scales_in_lf and child_link_name not in scales_in_lf:
+ scale_in_parent_lf = scales_in_lf[parent_link_name]
# The location of the joint frame is scaled using the scale in the parent frame
for origin in joint.iter("origin"):
current_origin_xyz = np.array([float(val) for val in origin.attrib["xyz"].split(" ")])
@@ -548,7 +551,7 @@ def scale_object(self):
# print("Adding: ", joint.find("child").attrib["link"])
- self.scales_in_link_frame[joint.find("child").attrib["link"]] = scale_in_child_lf
+ scales_in_lf[joint.find("child").attrib["link"]] = scale_in_child_lf
# The axis of the joint is defined in the joint frame, we scale it after applying the rotation
for axis in joint.iter("axis"):
@@ -682,7 +685,7 @@ def scale_object(self):
inertia.attrib["iyz"] = str(0.0)
inertia.attrib["izz"] = str(0.0)
- scale_in_lf = self.scales_in_link_frame[link.attrib["name"]]
+ scale_in_lf = scales_in_lf[link.attrib["name"]]
# Apply the scale to all mesh elements within the link (original scale and origin)
for mesh in link.iter("mesh"):
if "scale" in mesh.attrib:
@@ -783,7 +786,7 @@ def create_link_name_vm_mapping(self):
link_name_to_vm_urdf = {}
sub_urdf_tree = ET.parse(self.urdf_paths[i])
- links = sub_urdf_tree.findall("link")
+ links = sub_urdf_tree.findall(".//link")
for link in links:
name = link.attrib["name"]
if name in link_name_to_vm_urdf:
@@ -791,27 +794,6 @@ def create_link_name_vm_mapping(self):
link_name_to_vm_urdf[name] = []
for visual_mesh in link.findall("visual/geometry/mesh"):
link_name_to_vm_urdf[name].append(visual_mesh.attrib["filename"])
-
- if self.merge_fixed_links:
- # Add visual meshes of the child link to the parent link for fixed joints because they will be merged
- # by pybullet after loading
- vms_before_merging = set([item for _, vms in link_name_to_vm_urdf.items() for item in vms])
- directed_graph = nx.DiGraph()
- child_to_parent = dict()
- for joint in sub_urdf_tree.findall("joint"):
- if joint.attrib["type"] == "fixed":
- child_link_name = joint.find("child").attrib["link"]
- parent_link_name = joint.find("parent").attrib["link"]
- directed_graph.add_edge(child_link_name, parent_link_name)
- child_to_parent[child_link_name] = parent_link_name
- for child_link_name in list(nx.algorithms.topological_sort(directed_graph)):
- if child_link_name in child_to_parent:
- parent_link_name = child_to_parent[child_link_name]
- link_name_to_vm_urdf[parent_link_name].extend(link_name_to_vm_urdf[child_link_name])
- del link_name_to_vm_urdf[child_link_name]
- vms_after_merging = set([item for _, vms in link_name_to_vm_urdf.items() for item in vms])
- assert vms_before_merging == vms_after_merging
-
self.link_name_to_vm.append(link_name_to_vm_urdf)
def randomize_texture(self):
@@ -922,7 +904,7 @@ def prepare_procedural_texture(self):
self.procedural_material = procedural_material
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet and set it to the correct pose
"""
@@ -934,14 +916,14 @@ def _load(self, simulator):
flags |= p.URDF_IGNORE_VISUAL_SHAPES
for idx in range(len(self.urdf_paths)):
- logging.info("Loading " + self.urdf_paths[idx])
+ # logging.info("Loading " + self.urdf_paths[idx])
is_fixed = self.is_fixed[idx]
body_id = p.loadURDF(self.urdf_paths[idx], flags=flags, useFixedBase=is_fixed)
# flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
transformation = self.poses[idx]
pos = transformation[0:3, 3]
orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))
- logging.info("Moving URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn))
+ # logging.info("Moving URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn))
dynamics_info = p.getDynamicsInfo(body_id, -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
@@ -962,15 +944,6 @@ def _load(self, simulator):
joint_position = self.joint_positions[idx][joint_name]
set_joint_position(body_id, j, joint_position)
- simulator.load_object_in_renderer(
- self,
- body_id,
- self.class_id,
- visual_mesh_to_material=self.visual_mesh_to_material[idx],
- link_name_to_vm=self.link_name_to_vm[idx],
- **self._rendering_params
- )
-
self.body_ids.append(body_id)
self.load_supporting_surfaces()
@@ -995,7 +968,7 @@ def reset(self):
transformation = self.poses[idx]
pos = transformation[0:3, 3]
orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))
- logging.info("Resetting URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn))
+ # logging.info("Resetting URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn))
dynamics_info = p.getDynamicsInfo(body_id, -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
@@ -1011,19 +984,108 @@ def reset(self):
body_id, j, p.VELOCITY_CONTROL, targetVelocity=0.0, force=self.joint_friction
)
+ def get_position(self):
+ """
+ Get object position
+
+ :return: position in xyz
+ """
+ body_id = self.get_body_id()
+ pos, _ = p.getBasePositionAndOrientation(body_id)
+ return pos
+
+ def get_orientation(self):
+ """
+ Get object orientation
+
+ :return: quaternion in xyzw
+ """
+ body_id = self.get_body_id()
+ _, orn = p.getBasePositionAndOrientation(body_id)
+ return orn
+
+ def get_position_orientation(self):
+ """
+ Get object position and orientation
+
+ :return: position in xyz
+ :return: quaternion in xyzw
+ """
+ body_id = self.get_body_id()
+ pos, orn = p.getBasePositionAndOrientation(body_id)
+ return pos, orn
+
+ def get_base_link_position_orientation(self):
+ """
+ Get object base link position and orientation
+
+ :return: position in xyz
+ :return: quaternion in xyzw
+ """
+ # TODO: not used anywhere yet, but probably should be put in ObjectBase
+ body_id = self.get_body_id()
+ pos, orn = p.getBasePositionAndOrientation(body_id)
+ dynamics_info = p.getDynamicsInfo(body_id, -1)
+ inertial_pos = dynamics_info[3]
+ inertial_orn = dynamics_info[4]
+ inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn)
+ pos, orn = p.multiplyTransforms(pos, orn, inv_inertial_pos, inv_inertial_orn)
+ return pos, orn
+
+ def set_position(self, pos):
+ """
+ Set object position
+
+ :param pos: position in xyz
+ """
+ body_id = self.get_body_id()
+ if self.main_body_is_fixed:
+ logging.warning("cannot set_position for fixed objects")
+ return
+
+ _, old_orn = p.getBasePositionAndOrientation(body_id)
+ p.resetBasePositionAndOrientation(body_id, pos, old_orn)
+ clear_cached_states(self)
+
+ def set_orientation(self, orn):
+ """
+ Set object orientation
+
+ :param orn: quaternion in xyzw
+ """
+ body_id = self.get_body_id()
+ if self.main_body_is_fixed:
+ logging.warning("cannot set_orientation for fixed objects")
+ return
+
+ old_pos, _ = p.getBasePositionAndOrientation(body_id)
+ p.resetBasePositionAndOrientation(body_id, old_pos, orn)
+ clear_cached_states(self)
+
def set_position_orientation(self, pos, orn):
+ """
+ Set object position and orientation
+ :param pos: position in xyz
+ :param orn: quaternion in xyzw
+ """
+ body_id = self.get_body_id()
if self.main_body_is_fixed:
- logging.warning("cannot set position / orientation for fixed objects")
+ logging.warning("cannot set_position_orientation for fixed objects")
return
- super(URDFObject, self).set_position_orientation(pos, orn)
+ p.resetBasePositionAndOrientation(body_id, pos, orn)
+ clear_cached_states(self)
def set_base_link_position_orientation(self, pos, orn):
+ body_id = self.get_body_id()
if self.main_body_is_fixed:
logging.warning("cannot set_base_link_position_orientation for fixed objects")
return
-
- super(URDFObject, self).set_base_link_position_orientation(pos, orn)
+ dynamics_info = p.getDynamicsInfo(body_id, -1)
+ inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
+ pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
+ self.set_position_orientation(pos, orn)
+ clear_cached_states(self)
def get_body_id(self):
return self.body_ids[self.main_body]
@@ -1050,147 +1112,3 @@ def add_meta_links(self, meta_links):
def set_room_floor(self, room_floor):
assert self.category == "floors"
self.room_floor = room_floor
-
- def prepare_link_based_bounding_boxes(self):
- """This function simply converts the link bounding box metadata to match prefixed link names."""
- self.unscaled_link_bounding_boxes = {}
- if "link_bounding_boxes" in self.metadata:
- for name, bb_data in self.metadata["link_bounding_boxes"].items():
- converted_name = self.get_prefixed_link_name(name)
- self.unscaled_link_bounding_boxes[converted_name] = bb_data
-
- def get_base_aligned_bounding_box(
- self, body_id=None, link_id=None, visual=False, xy_aligned=False, fallback_to_aabb=False
- ):
- """Get a bounding box for this object that's axis-aligned in the object's base frame."""
- if body_id is None:
- body_id = self.get_body_id()
-
- bbox_type = "visual" if visual else "collision"
-
- # Get the base position transform.
- pos, orn = p.getBasePositionAndOrientation(body_id)
- base_com_to_world = utils.quat_pos_to_mat(pos, orn)
-
- # Compute the world-to-base frame transform.
- world_to_base_com = trimesh.transformations.inverse_matrix(base_com_to_world)
-
- # Grab the corners of all the different links' bounding boxes. We will later fit a bounding box to
- # this set of points to get our final, base-frame bounding box.
- points = []
-
- links = [link_id] if link_id is not None else get_all_links(body_id)
- for link in links:
- link_name = get_link_name(body_id, link)
- # If the link has a bounding box annotation.
- if link_name in self.unscaled_link_bounding_boxes:
- # If a visual bounding box does not exist in the dictionary, try switching to collision.
- # We expect that every link has its collision bb annotated (or set to None if none exists).
- if bbox_type == "visual" and "visual" not in self.unscaled_link_bounding_boxes[link_name]:
- logging.debug(
- "Falling back to collision bbox for object %s link %s since no visual bbox exists.",
- self.name,
- link_name,
- )
- bbox_type = "collision"
-
- # Check if the annotation is still missing.
- if bbox_type not in self.unscaled_link_bounding_boxes[link_name]:
- raise ValueError(
- "Could not find %s bounding box for object %s link %s" % (bbox_type, self.name, link_name)
- )
-
- # Check if a mesh exists for this link. If None, the link is meshless, so we continue to the next link.
- if self.unscaled_link_bounding_boxes[link_name][bbox_type] is None:
- continue
-
- # Get the extent and transform.
- bb_data = self.unscaled_link_bounding_boxes[link_name][bbox_type]["oriented"]
- extent_in_bbox_frame = np.array(bb_data["extent"])
- bbox_to_link_origin = np.array(bb_data["transform"])
-
- # Get the link's pose in the base frame.
- if link == -1:
- link_com_to_base_com = np.eye(4)
- else:
- link_state = get_link_state(body_id, link, velocity=False)
- link_com_to_world = utils.quat_pos_to_mat(
- link_state.linkWorldPosition, link_state.linkWorldOrientation
- )
- link_com_to_base_com = np.dot(world_to_base_com, link_com_to_world)
-
- # Scale the bounding box in link origin frame. Here we create a transform that first puts the bounding
- # box's vertices into the link frame, and then scales them to match the scale applied to this object.
- # Note that once scaled, the vertices of the bounding box do not necessarily form a cuboid anymore but
- # instead a parallelepiped. This is not a problem because we later fit a bounding box to the points,
- # this time in the object's base link frame.
- scale_in_link_frame = np.diag(np.concatenate([self.scales_in_link_frame[link_name], [1]]))
- bbox_to_scaled_link_origin = np.dot(scale_in_link_frame, bbox_to_link_origin)
-
- # Account for the link vs. center-of-mass.
- dynamics_info = p.getDynamicsInfo(body_id, link)
- inertial_pos, inertial_orn = p.invertTransform(dynamics_info[3], dynamics_info[4])
- link_origin_to_link_com = utils.quat_pos_to_mat(inertial_pos, inertial_orn)
-
- # Compute the bounding box vertices in the base frame.
- bbox_to_link_com = np.dot(link_origin_to_link_com, bbox_to_scaled_link_origin)
- bbox_center_in_base_com = np.dot(link_com_to_base_com, bbox_to_link_com)
- vertices_in_base_com = np.array(list(itertools.product((1, -1), repeat=3))) * (extent_in_bbox_frame / 2)
-
- # Add the points to our collection of points.
- points.extend(trimesh.transformations.transform_points(vertices_in_base_com, bbox_center_in_base_com))
- elif fallback_to_aabb:
- # If no BB annotation is available, get the AABB for this link.
- aabb_center, aabb_extent = get_center_extent(body_id, link=link)
- aabb_vertices_in_world = aabb_center + np.array(list(itertools.product((1, -1), repeat=3))) * (
- aabb_extent / 2
- )
- aabb_vertices_in_base_com = trimesh.transformations.transform_points(
- aabb_vertices_in_world, world_to_base_com
- )
- points.extend(aabb_vertices_in_base_com)
- else:
- raise ValueError(
- "Bounding box annotation missing for link: %s. Use fallback_to_aabb=True if you're okay with using "
- "AABB as fallback." % link_name
- )
-
- if xy_aligned:
- # If the user requested an XY-plane aligned bbox, convert everything to that frame.
- # The desired frame is same as the base_com frame with its X/Y rotations removed.
- translate = trimesh.transformations.translation_from_matrix(base_com_to_world)
-
- # To find the rotation that this transform does around the Z axis, we rotate the [1, 0, 0] vector by it
- # and then take the arctangent of its projection onto the XY plane.
- rotated_X_axis = base_com_to_world[:3, 0]
- rotation_around_Z_axis = np.arctan2(rotated_X_axis[1], rotated_X_axis[0])
- xy_aligned_base_com_to_world = trimesh.transformations.compose_matrix(
- translate=translate, angles=[0, 0, rotation_around_Z_axis]
- )
-
- # We want to move our points to this frame as well.
- world_to_xy_aligned_base_com = trimesh.transformations.inverse_matrix(xy_aligned_base_com_to_world)
- base_com_to_xy_aligned_base_com = np.dot(world_to_xy_aligned_base_com, base_com_to_world)
- points = trimesh.transformations.transform_points(points, base_com_to_xy_aligned_base_com)
-
- # Finally update our desired frame.
- desired_frame_to_world = xy_aligned_base_com_to_world
- else:
- # Default desired frame is base CoM frame.
- desired_frame_to_world = base_com_to_world
-
- # TODO: Implement logic to allow tight bounding boxes that don't necessarily have to match the base frame.
- # All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM.
- # Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame.
- aabb_min_in_desired_frame = np.amin(points, axis=0)
- aabb_max_in_desired_frame = np.amax(points, axis=0)
- bbox_center_in_desired_frame = (aabb_min_in_desired_frame + aabb_max_in_desired_frame) / 2
- bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame
-
- # Transform the center to the world frame.
- bbox_center_in_world = trimesh.transformations.transform_points(
- [bbox_center_in_desired_frame], desired_frame_to_world
- )[0]
- bbox_orn_in_world = Rotation.from_matrix(desired_frame_to_world[:3, :3]).as_quat()
-
- return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame
diff --git a/igibson/objects/cube.py b/igibson/objects/cube.py
index dc53b054d..54045de15 100644
--- a/igibson/objects/cube.py
+++ b/igibson/objects/cube.py
@@ -1,24 +1,22 @@
import pybullet as p
-from igibson.objects.object_base import SingleBodyObject
from igibson.objects.stateful_object import StatefulObject
-from igibson.utils.constants import SemanticClass
-class Cube(StatefulObject, SingleBodyObject):
+class Cube(StatefulObject):
"""
Cube shape primitive
"""
- def __init__(self, pos=[1, 2, 3], dim=[1, 2, 3], visual_only=False, mass=1000, color=[1, 1, 1, 1], **kwargs):
- super(Cube, self).__init__(**kwargs)
+ def __init__(self, pos=[1, 2, 3], dim=[1, 2, 3], visual_only=False, mass=1000, color=[1, 1, 1, 1]):
+ super(Cube, self).__init__()
self.basePos = pos
self.dimension = dim
self.visual_only = visual_only
self.mass = mass
self.color = color
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -34,6 +32,4 @@ def _load(self, simulator):
p.resetBasePositionAndOrientation(body_id, self.basePos, baseOrientation)
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
+ return body_id
diff --git a/igibson/objects/multi_object_wrappers.py b/igibson/objects/multi_object_wrappers.py
index 955e002a9..5943bf37b 100644
--- a/igibson/objects/multi_object_wrappers.py
+++ b/igibson/objects/multi_object_wrappers.py
@@ -2,12 +2,12 @@
import pybullet as p
-from igibson.object_states.factory import get_state_name
from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState
-from igibson.objects.object_base import BaseObject, NonRobotObject
+from igibson.objects.object_base import Object
+from igibson.objects.stateful_object import StatefulObject
-class ObjectGrouper(NonRobotObject):
+class ObjectGrouper(StatefulObject):
"""A multi-object wrapper that groups multiple objects and applies operations to all of them in parallel."""
class ProceduralMaterialAggregator(object):
@@ -56,7 +56,7 @@ def dump(self):
)
def load(self, data):
- if isinstance(data, ObjectGrouper.AbsoluteStateAggregator.GroupedStateDump):
+ if isinstance(data, (ObjectGrouper.AbsoluteStateAggregator.GroupedStateDump, list)):
# We're loading a many-to-many data dump.
assert len(data) == len(self.object_grouper.objects)
@@ -83,9 +83,11 @@ def set_value(self, other, new_value):
obj.states[self.state_type].set_value(other, new_value)
def __init__(self, objects_with_pose_offsets):
+ super(StatefulObject, self).__init__()
+
objects = [obj for obj, _ in objects_with_pose_offsets]
pose_offsets = [trans for _, trans in objects_with_pose_offsets]
- assert objects and all(isinstance(obj, BaseObject) for obj in objects)
+ assert objects and all(isinstance(obj, Object) for obj in objects)
self.objects = objects
# Pose offsets are the transformation of the object parts to the whole
@@ -138,15 +140,12 @@ def grouped_function(*args, **kwargs):
return attrs[0]
- def _load(self, simulator):
+ def _load(self):
body_ids = []
for obj in self.objects:
- body_ids += obj.load(simulator)
+ body_ids += obj._load()
return body_ids
- def get_body_id(self):
- raise ValueError("Cannot get_body_id on ObjectGrouper")
-
def get_position(self):
raise ValueError("Cannot get_position on ObjectGrouper")
@@ -170,24 +169,17 @@ def set_base_link_position_orientation(self, pos, orn):
new_pos, new_orn = p.multiplyTransforms(pos, orn, part_pos, part_orn)
obj.set_base_link_position_orientation(new_pos, new_orn)
- def dump_state(self):
- return {
- get_state_name(state_type): state_instance.dump()
- for state_type, state_instance in self.states.items()
- if issubclass(state_type, AbsoluteObjectState)
- }
-
- def load_state(self, dump):
- for state_type, state_instance in self.states.items():
- if issubclass(state_type, AbsoluteObjectState):
- state_instance.load(dump[get_state_name(state_type)])
+ def rotate_by(self, x=0, y=0, z=0):
+ raise ValueError("Cannot rotate_by on ObjectGrouper")
-class ObjectMultiplexer(NonRobotObject):
+class ObjectMultiplexer(StatefulObject):
"""A multi-object wrapper that acts as a proxy for the selected one between the set of objects it contains."""
def __init__(self, name, multiplexed_objects, current_index):
- assert multiplexed_objects and all(isinstance(obj, BaseObject) for obj in multiplexed_objects)
+ super(StatefulObject, self).__init__()
+
+ assert multiplexed_objects and all(isinstance(obj, Object) for obj in multiplexed_objects)
assert 0 <= current_index < len(multiplexed_objects)
for obj in multiplexed_objects:
@@ -197,6 +189,9 @@ def __init__(self, name, multiplexed_objects, current_index):
self.current_index = current_index
self.name = name
+ # This will help route obj.states to one of the multiplexed_objects
+ del self.states
+
def set_selection(self, idx):
assert 0 <= idx < len(self._multiplexed_objects)
self.current_index = idx
@@ -213,15 +208,12 @@ def __getattr__(self, item):
return getattr(self.current_selection(), item)
- def _load(self, simulator):
+ def _load(self):
body_ids = []
for obj in self._multiplexed_objects:
- body_ids += obj.load(simulator)
+ body_ids += obj._load()
return body_ids
- def get_body_id(self):
- return self.current_selection().get_body_id()
-
def get_position(self):
return self.current_selection().get_position()
@@ -243,6 +235,9 @@ def set_position_orientation(self, pos, orn):
def set_base_link_position_orientation(self, pos, orn):
return self.current_selection().set_base_link_position_orientation(pos, orn)
+ def rotate_by(self, x=0, y=0, z=0):
+ return self.current_selection().rotate_by(x, y, z)
+
def dump_state(self):
return {
"current_index": self.current_index,
diff --git a/igibson/objects/object_base.py b/igibson/objects/object_base.py
index c7d80af97..597b9c5b2 100644
--- a/igibson/objects/object_base.py
+++ b/igibson/objects/object_base.py
@@ -1,131 +1,110 @@
-from abc import ABCMeta, abstractmethod
-
-import numpy as np
import pybullet as p
-from future.utils import with_metaclass
-
-from igibson.utils.constants import SemanticClass
-class BaseObject(with_metaclass(ABCMeta, object)):
- """This is the interface that all iGibson objects must implement."""
+class Object(object):
+ """
+ Base Object class
+ """
- DEFAULT_RENDERING_PARAMS = {
- "use_pbr": True,
- "use_pbr_mapping": True,
- "shadow_caster": True,
- }
+ def __init__(self):
+ self.body_id = None
+ self.loaded = False
- def __init__(self, class_id=SemanticClass.USER_ADDED_OBJS, rendering_params=None):
- """
- Create an object instance with the minimum information of class ID and rendering parameters.
+ # initialize with empty states
+ self.states = dict()
+ # handle to instances in the renderer
+ self.renderer_instances = []
- @param class_id: What class ID the object should be assigned in semantic segmentation rendering mode.
- @param rendering_params: Any keyword arguments to be passed into simulator.load_object_into_renderer(...).
- """
- self.states = {}
- self._loaded = False
- self.class_id = class_id
- self._rendering_params = dict(self.DEFAULT_RENDERING_PARAMS)
- if rendering_params is not None:
- self._rendering_params.update(rendering_params)
+ def highlight(self):
+ for instance in self.renderer_instances:
+ instance.set_highlight(True)
- def load(self, simulator):
- """Load object into pybullet and return list of loaded body ids."""
- if self._loaded:
- raise ValueError("Cannot load a single object multiple times.")
- self._loaded = True
- return self._load(simulator)
+ def unhighlight(self):
+ for instance in self.renderer_instances:
+ instance.set_highlight(False)
- @abstractmethod
- def get_body_id(self):
+ def load(self):
"""
- Gets the body ID for the object.
-
- If the object somehow has multiple bodies, this will be the default body that the default manipulation functions
- will manipulate.
+ Load the object into pybullet.
+ _load() will be implemented in the subclasses
"""
- pass
-
- @abstractmethod
- def _load(self, simulator):
- pass
+ if self.loaded:
+ return self.body_id
+ self.body_id = self._load()
+ self.loaded = True
+ return self.body_id
def get_position(self):
- """Get object position in the format of Array[x, y, z]"""
- return self.get_position_orientation()[0]
+ """
+ Get object position
+
+ :return: position in xyz
+ """
+ pos, _ = p.getBasePositionAndOrientation(self.body_id)
+ return pos
def get_orientation(self):
- """Get object orientation as a quaternion in the format of Array[x, y, z, w]"""
- return self.get_position_orientation()[1]
+ """
+ Get object orientation
+
+ :return: quaternion in xyzw
+ """
+ _, orn = p.getBasePositionAndOrientation(self.body_id)
+ return orn
def get_position_orientation(self):
- """Get object position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]"""
- pos, orn = p.getBasePositionAndOrientation(self.get_body_id())
- return np.array(pos), np.array(orn)
+ """
+ Get object position and orientation
+
+ :return: position in xyz
+ :return: quaternion in xyzw
+ """
+ pos, orn = p.getBasePositionAndOrientation(self.body_id)
+ return pos, orn
def set_position(self, pos):
- """Set object position in the format of Array[x, y, z]"""
- old_orn = self.get_orientation()
- self.set_position_orientation(pos, old_orn)
+ """
+ Set object position
+
+ :param pos: position in xyz
+ """
+ _, old_orn = p.getBasePositionAndOrientation(self.body_id)
+ p.resetBasePositionAndOrientation(self.body_id, pos, old_orn)
def set_orientation(self, orn):
- """Set object orientation as a quaternion in the format of Array[x, y, z, w]"""
- old_pos = self.get_position()
- self.set_position_orientation(old_pos, orn)
+ """
+ Set object orientation
+
+ :param orn: quaternion in xyzw
+ """
+ old_pos, _ = p.getBasePositionAndOrientation(self.body_id)
+ p.resetBasePositionAndOrientation(self.body_id, old_pos, orn)
def set_position_orientation(self, pos, orn):
- """Set object position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]"""
- p.resetBasePositionAndOrientation(self.get_body_id(), pos, orn)
+ """
+ Set object position and orientation
+ :param pos: position in xyz
+ :param orn: quaternion in xyzw
+ """
+ p.resetBasePositionAndOrientation(self.body_id, pos, orn)
def set_base_link_position_orientation(self, pos, orn):
- """Set object base link position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]"""
- dynamics_info = p.getDynamicsInfo(self.get_body_id(), -1)
+ dynamics_info = p.getDynamicsInfo(self.body_id, -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
self.set_position_orientation(pos, orn)
+ def rotate_by(self, x=0, y=0, z=0):
+ """
+ Rotates an object by given euler angles
+ """
+ e_x, e_y, e_z = p.getEulerFromQuaternion(self.get_orientation())
+ self.set_orientation(p.getQuaternionFromEuler([e_x + x, e_y + y, e_z + z]))
+
def dump_state(self):
- """Dump the state of the object other than what's not included in pybullet state."""
+ """Dumps the state of the object other than what's not included in pybullet state."""
return None
def load_state(self, dump):
- """Load the state of the object other than what's not included in pybullet state."""
+ """Loads the state of the object other than what's not included in pybullet state."""
pass
-
-
-class NonRobotObject(BaseObject):
- # This class implements the object interface for non-robot objects.
- # Also allows us to identify non-robot objects until all simulator etc. call for importing etc. are unified.
-
- # TODO: This renderer_instances logic doesn't actually need to be specific to non-robot objects. Generalize this.
- def __init__(self, **kwargs):
- super(NonRobotObject, self).__init__(**kwargs)
-
- self.renderer_instances = []
-
- def highlight(self):
- for instance in self.renderer_instances:
- instance.set_highlight(True)
-
- def unhighlight(self):
- for instance in self.renderer_instances:
- instance.set_highlight(False)
-
-
-class SingleBodyObject(NonRobotObject):
- """Provides convenience get_body_id() function for single-body objects."""
-
- # TODO: Merge this into BaseObject once URDFObject also becomes single-body.
-
- def __init__(self, **kwargs):
- super(SingleBodyObject, self).__init__(**kwargs)
- self._body_id = None
-
- def load(self, simulator):
- body_ids = super(NonRobotObject, self).load(simulator)
- self._body_id = body_ids[0]
- return body_ids
-
- def get_body_id(self):
- return self._body_id
diff --git a/igibson/objects/particles.py b/igibson/objects/particles.py
index dc9ed087f..cf9f217be 100644
--- a/igibson/objects/particles.py
+++ b/igibson/objects/particles.py
@@ -7,24 +7,18 @@
import igibson
from igibson.external.pybullet_tools import utils
from igibson.external.pybullet_tools.utils import get_aabb_extent, get_link_name, link_from_name
-from igibson.objects.object_base import SingleBodyObject
+from igibson.objects.object_base import Object
from igibson.utils import sampling_utils
from igibson.utils.constants import PyBulletSleepState, SemanticClass
_STASH_POSITION = [0, 0, -100]
-class Particle(SingleBodyObject):
+class Particle(Object):
"""
A particle object, used to simulate water stream and dust/stain
"""
- DEFAULT_RENDERING_PARAMS = {
- "use_pbr": False,
- "use_pbr_mapping": False,
- "shadow_caster": True,
- }
-
def __init__(
self,
size,
@@ -35,7 +29,6 @@ def __init__(
base_shape="sphere",
mesh_filename=None,
mesh_bounding_box=None,
- **kwargs
):
"""
Create a particle.
@@ -48,9 +41,8 @@ def __init__(
:param base_shape: One of "cube", "sphere", "mesh". If mesh, mesh_filename also required.
:param mesh_filename: Filename of obj file to load mesh from, if base_shape is "mesh".
:param mesh_bounding_box: bounding box of the mesh when scale=1. Needed for scale computation.
- :param rendering_params: rendering parameters to pass onto object base & renderer.
"""
- super(Particle, self).__init__(**kwargs)
+ super(Particle, self).__init__()
self.base_pos = pos
self.size = size
self.visual_only = visual_only
@@ -65,7 +57,7 @@ def __init__(
self.mesh_filename = mesh_filename
self.mesh_scale = np.array(size) / np.array(mesh_bounding_box)
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -97,28 +89,34 @@ def _load(self, simulator):
p.resetBasePositionAndOrientation(body_id, np.array(self.base_pos), base_orientation)
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
self.force_sleep(body_id)
- return [body_id]
+ return body_id
def force_sleep(self, body_id=None):
if body_id is None:
- body_id = self.get_body_id()
+ body_id = self.body_id
activationState = p.ACTIVATION_STATE_SLEEP + p.ACTIVATION_STATE_DISABLE_WAKEUP
p.changeDynamics(body_id, -1, activationState=activationState)
def force_wakeup(self):
activationState = p.ACTIVATION_STATE_WAKE_UP
- p.changeDynamics(self.get_body_id(), -1, activationState=activationState)
+ p.changeDynamics(self.body_id, -1, activationState=activationState)
class ParticleSystem(object):
- DEFAULT_RENDERING_PARAMS = {} # Accept the Particle defaults but expose this interface for children
-
- def __init__(self, num, size, color=(1, 1, 1, 1), rendering_params=None, **kwargs):
+ def __init__(
+ self,
+ num,
+ size,
+ color=(1, 1, 1, 1),
+ class_id=SemanticClass.USER_ADDED_OBJS,
+ use_pbr=False,
+ use_pbr_mapping=False,
+ shadow_caster=True,
+ **kwargs
+ ):
size = np.array(size)
if size.ndim == 2:
assert size.shape[0] == num
@@ -133,19 +131,19 @@ def __init__(self, num, size, color=(1, 1, 1, 1), rendering_params=None, **kwarg
self._particles_activated_at_any_time = set()
self._simulator = None
-
- rendering_params_for_particle = dict(self.DEFAULT_RENDERING_PARAMS)
- if rendering_params is not None:
- rendering_params_for_particle.update(rendering_params)
+ self._import_params = {
+ "class_id": class_id,
+ "use_pbr": use_pbr,
+ "use_pbr_mapping": use_pbr_mapping,
+ "shadow_caster": shadow_caster,
+ }
for i in range(num):
# If different sizes / colors provided for each instance, pick the correct one for this instance.
this_size = size if size.ndim == 1 else size[i]
this_color = color if color.ndim == 1 else color[i]
- particle = Particle(
- this_size, _STASH_POSITION, color=this_color, rendering_params=rendering_params_for_particle, **kwargs
- )
+ particle = Particle(this_size, _STASH_POSITION, color=this_color, **kwargs)
self._all_particles.append(particle)
self._stashed_particles.append(particle)
@@ -209,10 +207,10 @@ def stash_particle(self, particle):
particle.force_sleep()
def _load_particle(self, particle):
- body_ids = particle.load(self._simulator)
+ body_id = self._simulator.import_object(particle, **self._import_params)
# Put loaded particles at the stash position initially.
particle.set_position(_STASH_POSITION)
- return body_ids
+ return body_id
def unstash_particle(self, position, orientation, particle=None):
# If the user wants a particular particle, give it to them. Otherwise, unstash one.
@@ -222,7 +220,7 @@ def unstash_particle(self, position, orientation, particle=None):
particle = self._stashed_particles.popleft()
# Lazy loading of the particle now if not already loaded
- if not particle.get_body_id():
+ if not particle.loaded:
self._load_particle(particle)
particle.set_position_orientation(position, orientation)
@@ -381,7 +379,6 @@ class WaterStream(ParticleSystem):
]
)
_COLOR_OPTIONS = np.array([(0.61, 0.82, 0.86, 1), (0.5, 0.77, 0.87, 1)])
- DEFAULT_RENDERING_PARAMS = {"use_pbr": True} # PBR needs to be on for the shiny water particles.
def __init__(self, water_source_pos, num, initial_dump=None, **kwargs):
if initial_dump is not None:
@@ -400,6 +397,7 @@ def __init__(self, water_source_pos, num, initial_dump=None, **kwargs):
color=self.colors,
visual_only=False,
mass=0.00005, # each drop is around 0.05 grams
+ use_pbr=True, # PBR needs to be on for the shiny water particles.
**kwargs
)
@@ -433,16 +431,16 @@ def initialize(self, simulator):
def _load_particle(self, particle):
# First load the particle normally.
- body_ids = super(WaterStream, self)._load_particle(particle)
+ body_id = super(WaterStream, self)._load_particle(particle)
# Set renderer instance settings on the particles.
instances = self._simulator.renderer.get_instances()
for instance in instances:
- if instance.pybullet_uuid in body_ids:
+ if instance.pybullet_uuid == body_id:
instance.roughness = 0
instance.metalness = 1
- return body_ids
+ return body_id
def set_running(self, on):
self.on = on
diff --git a/igibson/objects/pedestrian.py b/igibson/objects/pedestrian.py
index 2d4ff60de..21846c816 100644
--- a/igibson/objects/pedestrian.py
+++ b/igibson/objects/pedestrian.py
@@ -3,18 +3,16 @@
import pybullet as p
import igibson
-from igibson.objects.object_base import SingleBodyObject
from igibson.objects.stateful_object import StatefulObject
-from igibson.utils.constants import SemanticClass
-class Pedestrian(StatefulObject, SingleBodyObject):
+class Pedestrian(StatefulObject):
"""
Pedestiran object
"""
- def __init__(self, style="standing", pos=[0, 0, 0], **kwargs):
- super(Pedestrian, self).__init__(**kwargs)
+ def __init__(self, style="standing", pos=[0, 0, 0]):
+ super(Pedestrian, self).__init__()
self.collision_filename = os.path.join(
igibson.assets_path, "models", "person_meshes", "person_{}".format(style), "meshes", "person_vhacd.obj"
)
@@ -24,7 +22,7 @@ def __init__(self, style="standing", pos=[0, 0, 0], **kwargs):
self.cid = None
self.pos = pos
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -46,9 +44,7 @@ def _load(self, simulator):
parentFrameOrientation=[-0.5, -0.5, -0.5, 0.5],
) # facing x axis
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
+ return body_id
def reset_position_orientation(self, pos, orn):
"""
diff --git a/igibson/objects/shapenet_object.py b/igibson/objects/shapenet_object.py
index 98c9afdee..a20dd3437 100644
--- a/igibson/objects/shapenet_object.py
+++ b/igibson/objects/shapenet_object.py
@@ -1,19 +1,17 @@
import numpy as np
import pybullet as p
-from igibson.objects.object_base import SingleBodyObject
from igibson.objects.stateful_object import StatefulObject
-from igibson.utils.constants import SemanticClass
-class ShapeNetObject(StatefulObject, SingleBodyObject):
+class ShapeNetObject(StatefulObject):
"""
ShapeNet object
Reference: https://www.shapenet.org/
"""
- def __init__(self, path, scale=1.0, position=[0, 0, 0], orientation=[0, 0, 0], **kwargs):
- super(ShapeNetObject, self).__init__(**kwargs)
+ def __init__(self, path, scale=1.0, position=[0, 0, 0], orientation=[0, 0, 0]):
+ super(ShapeNetObject, self).__init__()
self.filename = path
self.scale = scale
self.position = position
@@ -35,7 +33,7 @@ def __init__(self, path, scale=1.0, position=[0, 0, 0], orientation=[0, 0, 0], *
"orientation_quat": pose[1],
}
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -47,6 +45,4 @@ def _load(self, simulator):
baseCollisionShapeIndex=collision_id,
baseVisualShapeIndex=-1,
)
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
+ return body_id
diff --git a/igibson/objects/soft_object.py b/igibson/objects/soft_object.py
index e120f9793..c7f4da1a8 100644
--- a/igibson/objects/soft_object.py
+++ b/igibson/objects/soft_object.py
@@ -1,11 +1,9 @@
import pybullet as p
-from igibson.objects.object_base import SingleBodyObject
from igibson.objects.stateful_object import StatefulObject
-from igibson.utils.constants import SemanticClass
-class SoftObject(StatefulObject, SingleBodyObject):
+class SoftObject(StatefulObject):
"""
Soft object (WIP)
"""
@@ -30,9 +28,8 @@ def __init__(
frictionCoeff=0,
useFaceContact=0,
useSelfCollision=0,
- **kwargs
):
- super(SoftObject, self).__init__(**kwargs)
+ super(SoftObject, self).__init__()
self.filename = filename
self.scale = scale
self.basePosition = basePosition
@@ -52,7 +49,7 @@ def __init__(
self.useFaceContact = useFaceContact
self.useSelfCollision = useSelfCollision
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -80,14 +77,10 @@ def _load(self, simulator):
# Set signed distance function voxel size (integrate to Simulator class?)
p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.1)
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params, softbody=True)
-
- return [body_id]
+ return body_id
def add_anchor(self, nodeIndex=-1, bodyUniqueId=-1, linkIndex=-1, bodyFramePosition=[0, 0, 0], physicsClientId=0):
"""
Create soft body anchor
"""
- p.createSoftBodyAnchor(
- self.get_body_id(), nodeIndex, bodyUniqueId, linkIndex, bodyFramePosition, physicsClientId
- )
+ p.createSoftBodyAnchor(self.body_id, nodeIndex, bodyUniqueId, linkIndex, bodyFramePosition, physicsClientId)
diff --git a/igibson/objects/stateful_object.py b/igibson/objects/stateful_object.py
index 2ec182ad5..1a9ad1641 100644
--- a/igibson/objects/stateful_object.py
+++ b/igibson/objects/stateful_object.py
@@ -1,22 +1,18 @@
from igibson.object_states.factory import get_state_name, prepare_object_states
from igibson.object_states.object_state_base import AbsoluteObjectState
from igibson.object_states.utils import clear_cached_states
-from igibson.objects.object_base import BaseObject
+from igibson.objects.object_base import Object
-class StatefulObject(BaseObject):
- """Objects that support object states."""
+class StatefulObject(Object):
+ """
+ Stateful Base Object class
+ """
- def __init__(self, abilities=None, **kwargs):
- super(StatefulObject, self).__init__(**kwargs)
- prepare_object_states(self, abilities=abilities)
-
- def load(self, simulator):
- body_ids = super(StatefulObject, self).load(simulator)
- for state in self.states.values():
- state.initialize(simulator)
-
- return body_ids
+ def __init__(self):
+ super(StatefulObject, self).__init__()
+ self.abilities = {}
+ prepare_object_states(self, online=True)
def dump_state(self):
return {
@@ -30,6 +26,14 @@ def load_state(self, dump):
if issubclass(state_type, AbsoluteObjectState):
state_instance.load(dump[get_state_name(state_type)])
+ def set_position(self, pos):
+ super(StatefulObject, self).set_position(pos)
+ clear_cached_states(self)
+
+ def set_orientation(self, orn):
+ super(StatefulObject, self).set_orientation(orn)
+ clear_cached_states(self)
+
def set_position_orientation(self, pos, orn):
super(StatefulObject, self).set_position_orientation(pos, orn)
clear_cached_states(self)
diff --git a/igibson/objects/visual_marker.py b/igibson/objects/visual_marker.py
index 21a9bc9d4..fac3e72e9 100644
--- a/igibson/objects/visual_marker.py
+++ b/igibson/objects/visual_marker.py
@@ -1,19 +1,13 @@
import pybullet as p
-from igibson.objects.object_base import SingleBodyObject
+from igibson.objects.object_base import Object
-class VisualMarker(SingleBodyObject):
+class VisualMarker(Object):
"""
Visual shape created with shape primitives
"""
- DEFAULT_RENDERING_PARAMS = {
- "use_pbr": False,
- "use_pbr_mapping": False,
- "shadow_caster": False,
- }
-
def __init__(
self,
visual_shape=p.GEOM_SPHERE,
@@ -24,7 +18,6 @@ def __init__(
initial_offset=[0, 0, 0],
filename=None,
scale=[1.0] * 3,
- **kwargs
):
"""
create a visual shape to show in pybullet and MeshRenderer
@@ -38,7 +31,7 @@ def __init__(
:param filename: mesh file name for p.GEOM_MESH
:param scale: scale for p.GEOM_MESH
"""
- super(VisualMarker, self).__init__(**kwargs)
+ super(VisualMarker, self).__init__()
self.visual_shape = visual_shape
self.rgba_color = rgba_color
self.radius = radius
@@ -48,7 +41,7 @@ def __init__(
self.filename = filename
self.scale = scale
- def _load(self, simulator):
+ def _load(self):
"""
Load the object into pybullet
"""
@@ -80,9 +73,7 @@ def _load(self, simulator):
baseVisualShapeIndex=shape, baseCollisionShapeIndex=-1, flags=p.URDF_ENABLE_SLEEPING
)
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
+ return body_id
def set_color(self, color):
"""
@@ -90,18 +81,18 @@ def set_color(self, color):
:param color: normalized rgba color
"""
- p.changeVisualShape(self.get_body_id(), -1, rgbaColor=color)
+ p.changeVisualShape(self.body_id, -1, rgbaColor=color)
def force_sleep(self, body_id=None):
if body_id is None:
- body_id = self.get_body_id()
+ body_id = self.body_id
activationState = p.ACTIVATION_STATE_SLEEP + p.ACTIVATION_STATE_DISABLE_WAKEUP
p.changeDynamics(body_id, -1, activationState=activationState)
def force_wakeup(self):
activationState = p.ACTIVATION_STATE_WAKE_UP
- p.changeDynamics(self.get_body_id(), -1, activationState=activationState)
+ p.changeDynamics(self.body_id, -1, activationState=activationState)
def set_position(self, pos):
self.force_wakeup()
diff --git a/igibson/objects/ycb_object.py b/igibson/objects/ycb_object.py
index d331c5a97..14a184abe 100644
--- a/igibson/objects/ycb_object.py
+++ b/igibson/objects/ycb_object.py
@@ -3,24 +3,22 @@
import pybullet as p
import igibson
-from igibson.objects.object_base import SingleBodyObject
from igibson.objects.stateful_object import StatefulObject
-from igibson.utils.constants import SemanticClass
-class YCBObject(StatefulObject, SingleBodyObject):
+class YCBObject(StatefulObject):
"""
YCB Object from assets/models/ycb
Reference: https://www.ycbbenchmarks.com/
"""
- def __init__(self, name, scale=1, **kwargs):
- super(YCBObject, self).__init__(**kwargs)
+ def __init__(self, name, scale=1):
+ super(YCBObject, self).__init__()
self.visual_filename = os.path.join(igibson.assets_path, "models", "ycb", name, "textured_simple.obj")
self.collision_filename = os.path.join(igibson.assets_path, "models", "ycb", name, "textured_simple_vhacd.obj")
self.scale = scale
- def _load(self, simulator):
+ def _load(self):
collision_id = p.createCollisionShape(p.GEOM_MESH, fileName=self.collision_filename, meshScale=self.scale)
visual_id = p.createVisualShape(p.GEOM_MESH, fileName=self.visual_filename, meshScale=self.scale)
@@ -30,14 +28,8 @@ def _load(self, simulator):
basePosition=[0.2, 0.2, 1.5],
baseMass=0.1,
)
+ self.body_id = body_id
+ return body_id
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
-
- def reset(self):
- return
-
- def force_wakeup(self):
- activationState = p.ACTIVATION_STATE_WAKE_UP
- p.changeDynamics(self.get_body_id(), -1, activationState=activationState)
+ def get_body_id(self):
+ return self.body_id
diff --git a/igibson/physics/settings.py b/igibson/physics/settings.py
deleted file mode 100644
index 53ed518bc..000000000
--- a/igibson/physics/settings.py
+++ /dev/null
@@ -1,8 +0,0 @@
-class PhysicsSettings(object):
- def __init__(
- self,
- solver_iterations=100,
- enhanced_determinism=False,
- ):
- self.solver_iterations = solver_iterations
- enhanced_determinism = enhanced_determinism
diff --git a/igibson/render/CMakeLists.txt b/igibson/render/CMakeLists.txt
index 20d0d0db3..65f5ba714 100644
--- a/igibson/render/CMakeLists.txt
+++ b/igibson/render/CMakeLists.txt
@@ -2,221 +2,147 @@ cmake_minimum_required(VERSION 2.8.12)
project(CppMeshRenderer)
set(USE_GLAD TRUE)
-set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
-set(OpenGL_GL_PREFERENCE GLVND)
-
-if(MAC_PLATFORM)
- set(USE_GLFW TRUE)
- set(USE_VR FALSE)
- set(USE_EGL FALSE)
-elseif(WIN32)
- set(USE_GLFW FALSE)
- # iGibson on Windows is always in VR mode
- set(USE_VR TRUE)
- set(USE_EGL FALSE)
-elseif(UNIX)
- set(USE_GLFW FALSE)
- set(USE_EGL TRUE)
-else()
- message( FATAL_ERROR "Unsupported platform: iGibson only supports macOS, linux, and windows" )
-endif()
-# If a user is using a new enough cmake version (Ubuntu 18.04 or greater)
-# Automatically detect and enable/disable CUDA, otherwise assume true
-if (UNIX AND (CMAKE_VERSION VERSION_GREATER 3.17 OR CMAKE_VERSION VERSION_EQUAL 3.17))
- find_package(CUDAToolkit)
- if (NOT CUDAToolkit_FOUND)
- message(WARNING [=[
- nvcc not detected in path. iGibson will be built without CUDA support used
- for rendering to tensors. If desired, please install cudatoolkit with your
- package manager or cudatoolkit-dev with conda.
- ]=])
+if (MAC_PLATFORM)
+ set(USE_CUDA FALSE)
+ set(USE_GLFW TRUE)
+ set(USE_VR FALSE)
+elseif (WIN32)
set(USE_CUDA FALSE)
- else ()
+ set(USE_GLFW TRUE)
+ # iGibson on Windows is always in VR mode
+ set(USE_VR TRUE)
+else ()
set(USE_CUDA TRUE)
- endif ()
-elseif(UNIX)
- # Assume CUDA true otherwise
- set(USE_CUDA TRUE)
-else()
- # Assume CUDA false otherwise (macOS, Windows)
- set(USE_CUDA FALSE)
-endif()
+ set(USE_GLFW FALSE)
+ set(USE_VR FALSE)
+endif ()
include_directories(glad)
-if(NOT USE_GLAD)
- find_package(OpenGL)
-else()
- add_definitions(-DUSE_GLAD)
-endif()
+if (NOT USE_GLAD)
+ find_package(OpenGL)
+else ()
+ add_definitions(-DUSE_GLAD)
+endif ()
-if(USE_VR)
- find_package(OpenGL)
- # OpenGL is needed for vr-gl interoperation
+if (USE_VR)
+ find_package(OpenGL)
+ # OpenGL is needed for vr-gl interoperation
endif()
-if(USE_CUDA)
- add_definitions(-DUSE_CUDA)
- find_package(OpenGL)
- # OpenGL is still needed for cuda-gl interoperation
-endif()
+if (USE_CUDA)
+ add_definitions(-DUSE_CUDA)
+ find_package(OpenGL)
+ #OpenGL is still needed for cuda-gl interoperation
+endif ()
add_subdirectory(pybind11)
add_subdirectory(cryptopp)
include_directories(cryptopp)
+
# Add GLM include directory
include_directories("glm")
-if(USE_GLFW OR USE_VR)
- set(GLFW_DIR glfw)
- set(GLFW_BUILD_EXAMPLES
- OFF
- CACHE INTERNAL "Build the GLFW example programs")
- set(GLFW_BUILD_TESTS
- OFF
- CACHE INTERNAL "Build the GLFW test programs")
- set(GLFW_BUILD_DOCS
- OFF
- CACHE INTERNAL "Build the GLFW documentation")
- set(GLFW_INSTALL
- OFF
- CACHE INTERNAL "Generate installation target")
- add_subdirectory("${GLFW_DIR}")
- include_directories("${GLFW_DIR}/include")
-endif()
-
-if(USE_VR)
- # Find OpenVR
- set(OPENVR_DIR openvr)
- set(OPENVR_INCLUDE_DIR "${OPENVR_DIR}/headers")
- include_directories("${OPENVR_INCLUDE_DIR}")
+if (USE_GLFW)
+ set(GLFW_DIR glfw)
+ set(GLFW_BUILD_EXAMPLES OFF CACHE INTERNAL "Build the GLFW example programs")
+ set(GLFW_BUILD_TESTS OFF CACHE INTERNAL "Build the GLFW test programs")
+ set(GLFW_BUILD_DOCS OFF CACHE INTERNAL "Build the GLFW documentation")
+ set(GLFW_INSTALL OFF CACHE INTERNAL "Generate installation target")
+ add_subdirectory("${GLFW_DIR}")
+ include_directories("${GLFW_DIR}/include")
+endif ()
+
+if (USE_VR)
+ # Find OpenVR
+ set(WINDOWS_PATH_SUFFIXES win64 Win64 x64)
+ set(OPENVR_DIR openvr)
+ find_library(OPENVR_LIBRARIES
+ NAMES
+ openvr_api
+ PATHS
+ "${OPENVR_DIR}/bin"
+ "${OPENVR_DIR}/lib"
+ PATH_SUFFIXES
+ ${WINDOWS_PATH_SUFFIXES}
+ NO_DEFAULT_PATH
+ NO_CMAKE_FIND_ROOT_PATH
+ )
+ set(OPENVR_INCLUDE_DIR "${OPENVR_DIR}/headers")
+ include_directories("${OPENVR_INCLUDE_DIR}")
- if(WIN32)
- set(PATH_SUFFIXES win64 Win64 x64)
# Find SRAnipal
set(SRANI_DIR sranipal)
- find_library(
- SRANI_LIBRARIES
- NAMES SRAnipal
- PATHS "${SRANI_DIR}/lib"
- NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(SRANI_LIBRARIES
+ NAMES
+ SRAnipal
+ PATHS
+ "${SRANI_DIR}/lib"
+ NO_DEFAULT_PATH
+ NO_CMAKE_FIND_ROOT_PATH
+ )
include_directories("${SRANI_DIR}/include")
- else()
- set(PATH_SUFFIXES linux64 Linux64 x64)
- endif()
-
- find_library(
- OPENVR_LIBRARIES
- NAMES openvr_api
- PATHS "${OPENVR_DIR}/bin" "${OPENVR_DIR}/lib"
- PATH_SUFFIXES ${PATH_SUFFIXES}
- NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
-
- add_library(
- VRRendererContext MODULE glad/gl.cpp cpp/vr_mesh_renderer.cpp
- cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp)
- if(WIN32)
- target_link_libraries(
- VRRendererContext
- PRIVATE pybind11::module
- ${CMAKE_DL_LIBS}
- glfw
- ${GLFW_LIBRARIES}
- ${OPENGL_LIBRARIES}
- ${OPENVR_LIBRARIES}
- ${SRANI_LIBRARIES}
- cryptopp-static)
- else()
- target_link_libraries(
- VRRendererContext
- PRIVATE pybind11::module
- ${CMAKE_DL_LIBS}
- glfw
- ${GLFW_LIBRARIES}
- ${OPENGL_LIBRARIES}
- ${OPENVR_LIBRARIES}
- ${CUDA_LIBRARIES}
- cryptopp-static)
- endif()
- set_target_properties(
- VRRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
endif()
-if(USE_EGL)
- if(USE_CUDA)
- find_package(CUDA REQUIRED)
- set(CUDA_LIBRARIES PUBLIC ${CUDA_LIBRARIES})
- cuda_add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp
- cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
- else()
- add_library(
- EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp
- cpp/egl_mesh_renderer.cpp)
- endif()
-
- if(USE_GLAD)
- target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl
- pthread cryptopp-static)
- else()
- target_link_libraries(
- EGLRendererContext PRIVATE pybind11::module dl pthread EGL
- ${OPENGL_LIBRARIES} cryptopp-static)
- endif()
-
- set_target_properties(
- EGLRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
+add_library(tinyobjloader MODULE cpp/tinyobjloader/tiny_obj_loader.cc cpp/tinyobjloader/tiny_obj_loader_decrypt.cc cpp/tinyobjloader/bindings.cc)
+if (USE_VR)
+ add_library(VRRendererContext MODULE glad/gl.cpp cpp/vr_mesh_renderer.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp)
+else()
+ if (USE_CUDA)
+ find_package(CUDA REQUIRED)
+ set(CUDA_LIBRARIES PUBLIC ${CUDA_LIBRARIES})
+ cuda_add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
+ else ()
+ add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
+ endif ()
+ if (USE_GLFW)
+ add_library(GLFWRendererContext MODULE glad/gl.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp)
+ endif ()
endif()
-if(USE_GLFW)
- add_library(GLFWRendererContext MODULE glad/gl.cpp cpp/glfw_mesh_renderer.cpp
- cpp/mesh_renderer.cpp)
- if(WIN32)
- if(USE_GLAD)
- target_link_libraries(
- GLFWRendererContext PRIVATE pybind11::module glfw ${GLFW_LIBRARIES}
- cryptopp-static)
- else()
- target_link_libraries(
- GLFWRendererContext PRIVATE pybind11::module glfw ${GLFW_LIBRARIES}
- ${OPENGL_LIBRARIES} cryptopp-static)
+if (USE_VR)
+ target_link_libraries(VRRendererContext PRIVATE pybind11::module ${CMAKE_DL_LIBS} glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES} ${OPENVR_LIBRARIES} ${SRANI_LIBRARIES} cryptopp-static)
+else ()
+ if (USE_GLAD)
+ target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread cryptopp-static)
+ if (USE_GLFW)
+ target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} cryptopp-static)
+ endif ()
+ else ()
+ target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread EGL ${OPENGL_LIBRARIES} cryptopp-static)
+ if (USE_GLFW)
+ target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES} cryptopp-static)
+ endif ()
endif()
- else()
- if(USE_GLAD)
- target_link_libraries(
- GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES}
- cryptopp-static)
- else()
- target_link_libraries(
- GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES}
- ${OPENGL_LIBRARIES} cryptopp-static)
- endif()
- endif()
- set_target_properties(
- GLFWRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
-endif()
+endif ()
-add_library(
- tinyobjloader MODULE
- cpp/tinyobjloader/tiny_obj_loader.cc
- cpp/tinyobjloader/tiny_obj_loader_decrypt.cc cpp/tinyobjloader/bindings.cc)
target_link_libraries(tinyobjloader PRIVATE pybind11::module cryptopp-static)
-set_target_properties(
- tinyobjloader PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
-
-if(UNIX)
- add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp)
- add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp)
- if(USE_GLAD)
- target_link_libraries(query_devices dl pthread)
- target_link_libraries(test_device dl pthread)
- else()
- target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES})
- target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES})
- endif()
+
+if (USE_VR)
+ set_target_properties(VRRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
+ SUFFIX "${PYTHON_MODULE_EXTENSION}")
+else()
+ set_target_properties(EGLRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
+ SUFFIX "${PYTHON_MODULE_EXTENSION}")
+ if (USE_GLFW)
+ set_target_properties(GLFWRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
+ SUFFIX "${PYTHON_MODULE_EXTENSION}")
+ endif ()
+endif()
+
+set_target_properties(tinyobjloader PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
+ SUFFIX "${PYTHON_MODULE_EXTENSION}")
+
+if (NOT USE_VR)
+ add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp)
+ add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp)
+ if (USE_GLAD)
+ target_link_libraries(query_devices dl pthread)
+ target_link_libraries(test_device dl pthread)
+ else ()
+ target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES})
+ target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES})
+ endif ()
endif()
diff --git a/igibson/render/cpp/glfw_mesh_renderer.cpp b/igibson/render/cpp/glfw_mesh_renderer.cpp
index 2e2392a92..8b78a7597 100644
--- a/igibson/render/cpp/glfw_mesh_renderer.cpp
+++ b/igibson/render/cpp/glfw_mesh_renderer.cpp
@@ -69,32 +69,6 @@ int GLFWRendererContext::init() {
exit(EXIT_FAILURE);
}
- // Our codebase assumes that window size is equal to the framebuffer size.
- // The change below was to maintain this assumption for high DPI displays.
- int framebufferWidth;
- int framebufferHeight;
- glfwGetFramebufferSize(this->window, &framebufferWidth, &framebufferHeight);
-
- int scaling_factor = framebufferWidth / m_windowWidth;
-
- // This accounts for retina display. This display uses double-sized pixels. We need to create a window that is
- // half the size to accomodate for that
- if(framebufferWidth!=m_windowWidth)
- {
- glfwDestroyWindow(this->window);
- if (m_fullscreen) {
- this->window = glfwCreateWindow(m_windowWidth/scaling_factor, m_windowHeight/scaling_factor, "Gibson Renderer Output", glfwGetPrimaryMonitor(), NULL);
- }
- else {
- this->window = glfwCreateWindow(m_windowWidth/scaling_factor, m_windowHeight/scaling_factor, "Gibson Renderer Output", NULL, NULL);
- }
-
- if (this->window == NULL) {
- fprintf(stderr, "ERROR: Failed to create GLFW window.\n");
- exit(EXIT_FAILURE);
- }
- }
-
glfwMakeContextCurrent(this->window);
glfwSwapInterval(0);
diff --git a/igibson/render/cpp/glfw_mesh_renderer.h b/igibson/render/cpp/glfw_mesh_renderer.h
index cff58aabe..3fbf4b0dd 100644
--- a/igibson/render/cpp/glfw_mesh_renderer.h
+++ b/igibson/render/cpp/glfw_mesh_renderer.h
@@ -2,7 +2,9 @@
#define GLFW_MESH_RENDERER_HEADER
#include "mesh_renderer.h"
+#ifdef WIN32
#include
+#endif
class GLFWRendererContext: public MeshRendererContext {
diff --git a/igibson/render/cpp/mesh_renderer.cpp b/igibson/render/cpp/mesh_renderer.cpp
index af7634867..8c946cb48 100644
--- a/igibson/render/cpp/mesh_renderer.cpp
+++ b/igibson/render/cpp/mesh_renderer.cpp
@@ -77,7 +77,7 @@ using CryptoPP::byte;
class Image {
public:
static std::shared_ptr fromFile(const std::string &filename, int channels) {
- std::printf("Loading image: %s\n", filename.c_str());
+ // std::printf("Loading image: %s\n", filename.c_str());
stbi_set_flip_vertically_on_load(false);
std::shared_ptr image{new Image};
@@ -325,7 +325,7 @@ py::list MeshRendererContext::setup_framebuffer_meshrenderer(int width, int heig
glBindTexture(GL_TEXTURE_2D, color_tex_normal);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, color_tex_semantics);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, color_tex_ins_seg);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, color_tex_3d);
@@ -388,7 +388,7 @@ py::list MeshRendererContext::setup_framebuffer_meshrenderer_ms(int width, int h
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_normal);
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA, width, height, GL_TRUE);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_semantics);
- glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA32F, width, height, GL_TRUE);
+ glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA, width, height, GL_TRUE);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_ins_seg);
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA32F, width, height, GL_TRUE);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_3d);
@@ -1007,7 +1007,7 @@ void MeshRendererContext::setup_pbr(std::string shader_path,
glFinish();
- std::cout << "INFO: compiled pbr shaders" << std::endl;
+ // std::cout << "INFO: compiled pbr shaders" << std::endl;
}
GLuint MeshRendererContext::linkProgram(std::initializer_list shaders) {
@@ -1056,7 +1056,7 @@ GLuint MeshRendererContext::compileShader(const std::string &filename, GLenum ty
}
const GLchar *srcBufferPtr = src.c_str();
- std::printf("Compiling GLSL shader: %s\n", filename.c_str());
+ // std::printf("Compiling GLSL shader: %s\n", filename.c_str());
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &srcBufferPtr, nullptr);
@@ -1149,10 +1149,10 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
std::vector texWidths;
std::vector texChannels;
std::vector buffer;
- printf("number of textures %d\n", num_textures);
+ // printf("number of textures %d\n", num_textures);
for (int i = 0; i < num_textures; i++) {
std::string filename = filenames[i];
- std::cout << "Filename is: " << filename << std::endl;
+ // std::cout << "Filename is: " << filename << std::endl;
int w;
int h;
int comp;
@@ -1166,7 +1166,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
// force to 3 channels
if (image == nullptr)
throw(std::string("Failed to load texture"));
- std::cout << "Size is w: " << w << " by h: " << h << std::endl;
+ // std::cout << "Size is w: " << w << " by h: " << h << std::endl;
comp = 3;
image_data.push_back(image);
texHeights.push_back(h);
@@ -1213,7 +1213,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
std::string tex_filename = filenames[i];
bool contains_keyword = tex_filename.find("floor") != std::string::npos || tex_filename.find("wall") != std::string::npos || tex_filename.find("ceiling") != std::string::npos;
if (score >= texCutoff || contains_keyword) {
- std::cout << "Appending texture with name: " << tex_filename << " to large bucket" << std::endl;
+ // std::cout << "Appending texture with name: " << tex_filename << " to large bucket" << std::endl;
texIndices[0].push_back(i);
tex_info_i.append(0);
tex_info_i.append(firstTexLayerNum);
@@ -1222,7 +1222,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
firstTexLayerNum++;
}
else {
- std::cout << "Appending texture with name: " << tex_filename << " to small bucket" << std::endl;
+ // std::cout << "Appending texture with name: " << tex_filename << " to small bucket" << std::endl;
texIndices[1].push_back(i);
tex_info_i.append(1);
tex_info_i.append(secondTexLayerNum);
@@ -1234,13 +1234,13 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
texLayerData.append(tex_info_i);
}
- printf("Texture 1 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[0], texLayerDims[1], firstTexLayerNum, texId1);
- if (shouldShrinkSmallTextures) {
- printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", smallTexBucketSize, smallTexBucketSize, secondTexLayerNum, texId2);
- }
- else {
- printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[2], texLayerDims[3], secondTexLayerNum, texId2);
- }
+ // printf("Texture 1 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[0], texLayerDims[1], firstTexLayerNum, texId1);
+ // if (shouldShrinkSmallTextures) {
+ // printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", smallTexBucketSize, smallTexBucketSize, secondTexLayerNum, texId2);
+ // }
+ // else {
+ // printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[2], texLayerDims[3], secondTexLayerNum, texId2);
+ // }
for (int i = 0; i < 2; i++) {
GLuint currTexId = texId1;
@@ -1253,8 +1253,8 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
GLint max_layers, max_size;
glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max_layers);
glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &max_size);
- printf("Max layer number: %d\n", max_layers);
- printf("Max texture size: %d\n", max_size);
+ // printf("Max layer number: %d\n", max_layers);
+ // printf("Max texture size: %d\n", max_size);
}
int layerNum = firstTexLayerNum;
@@ -1442,7 +1442,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil
glUniform1f(glGetUniformLocation(shaderProgram, "use_two_light_probe"), (float)m_use_two_light_probe);
glUniform1f(glGetUniformLocation(shaderProgram, "blend_highlight"), (float)blend_highlight);
- printf("multidrawcount %d\n", multidrawCount);
+ // printf("multidrawcount %d\n", multidrawCount);
glGenBuffers(1, &uboTexColorData);
glBindBuffer(GL_UNIFORM_BUFFER, uboTexColorData);
@@ -2005,7 +2005,7 @@ void MeshRendererContext::postRenderText() {
// Restore previous FBO if one was bound
// This allows us to render text both to the screen and to a texture
- if (this->restore_prev_FBO) {
+ if (this->restore_prev_FBO = true) {
glBindFramebuffer(GL_FRAMEBUFFER, this->m_prevFBO);
}
}
diff --git a/igibson/render/cpp/mesh_renderer.h b/igibson/render/cpp/mesh_renderer.h
index f74b2a1c9..52bfe4471 100644
--- a/igibson/render/cpp/mesh_renderer.h
+++ b/igibson/render/cpp/mesh_renderer.h
@@ -89,7 +89,7 @@ class MeshRendererContext {
cudaGraphicsResource* cuda_res[MAX_NUM_RESOURCES];
#endif
- void init() {};
+ int init() {};
void release() {};
@@ -254,4 +254,4 @@ class MeshRendererContext {
};
-#endif
+#endif
\ No newline at end of file
diff --git a/igibson/render/cpp/tinyobjloader/sample.py b/igibson/render/cpp/tinyobjloader/sample.py
index b681e3801..035ba2ae3 100644
--- a/igibson/render/cpp/tinyobjloader/sample.py
+++ b/igibson/render/cpp/tinyobjloader/sample.py
@@ -1,8 +1,7 @@
import sys
-
import tinyobjloader
-filename = "../models/cornell_box.obj"
+filename = "../models/cornell_box.obj";
reader = tinyobjloader.ObjReader()
@@ -60,8 +59,8 @@
m.diffuse = [1, 2, 3]
print(m.diffuse)
- # print(m.shininess)
- # print(m.illum)
+ #print(m.shininess)
+ #print(m.illum)
shapes = reader.GetShapes()
print("Num shapes: ", len(shapes))
diff --git a/igibson/render/cpp/vr_mesh_renderer.cpp b/igibson/render/cpp/vr_mesh_renderer.cpp
index 214a9830e..88bfd4094 100644
--- a/igibson/render/cpp/vr_mesh_renderer.cpp
+++ b/igibson/render/cpp/vr_mesh_renderer.cpp
@@ -1,10 +1,7 @@
#include "vr_mesh_renderer.h"
-
-#ifdef WIN32
#include "SRanipal.h"
#include "SRanipal_Eye.h"
#include "SRanipal_Enums.h"
-#endif
#include
#include
@@ -162,7 +159,7 @@ py::list VRRendererContext::getDeviceCoordinateSystem(char* device) {
// TIMELINE: Call after getDataForVRDevice, since this relies on knowing latest HMD transform
py::list VRRendererContext::getEyeTrackingData() {
py::list eyeData;
- #ifdef WIN32
+
// Transform data into Gibson coordinate system before returning to user
glm::vec3 gibOrigin(vrToGib * glm::vec4(eyeTrackingData.origin, 1.0));
glm::vec3 gibDir(vrToGib * glm::vec4(eyeTrackingData.dir, 1.0));
@@ -188,27 +185,8 @@ py::list VRRendererContext::getEyeTrackingData() {
eyeData.append(dir);
eyeData.append(eyeTrackingData.leftPupilDiameter);
eyeData.append(eyeTrackingData.rightPupilDiameter);
- // Return dummy data with false validity if eye tracking is not enabled (on non-Windows system)
- #else
- py::list dummy_origin, dummy_dir;
- float dummy_diameter_l, dummy_diameter_r;
- eyeData.append(false);
- eyeData.append(dummy_origin);
- eyeData.append(dummy_dir);
- eyeData.append(dummy_diameter_l);
- eyeData.append(dummy_diameter_r);
- #endif
- return eyeData;
-}
-// Returns whether the current VR system supports eye tracking
-bool VRRendererContext::hasEyeTrackingSupport() {
- #ifdef WIN32
- return ViveSR::anipal::Eye::IsViveProEye();
- // Non-windows OS always have eye tracking disabled
- #else
- return false;
- #endif
+ return eyeData;
}
// Gets the VR offset vector in form x, y, z
@@ -224,6 +202,11 @@ py::list VRRendererContext::getVROffset() {
return offset;
}
+// Returns whether the current VR system supports eye tracking
+bool VRRendererContext::hasEyeTrackingSupport() {
+ return ViveSR::anipal::Eye::IsViveProEye();
+}
+
// Initialize the VR system and compositor
// TIMELINE: Call during init of renderer, before height/width are set
void VRRendererContext::initVR(bool useEyeTracking) {
@@ -255,15 +238,12 @@ void VRRendererContext::initVR(bool useEyeTracking) {
// No VR system offset by default
vrOffsetVec = glm::vec3(0, 0, 0);
- // Only activate eye tracking on Windows
- #ifdef WIN32
// Set eye tracking boolean
this->useEyeTracking = useEyeTracking;
if (useEyeTracking) {
shouldShutDownEyeTracking = false;
initAnipal();
}
- #endif
}
// Polls for VR events, such as button presses
@@ -373,12 +353,10 @@ void VRRendererContext::releaseVR() {
vr::VR_Shutdown();
m_pHMD = NULL;
- #ifdef WIN32
if (this->useEyeTracking) {
this->shouldShutDownEyeTracking = true;
eyeTrackingThread->join();
}
- #endif
}
// Sets the offset of the VR headset
@@ -432,8 +410,8 @@ void VRRendererContext::updateVRData() {
hmdData.index = idx;
hmdData.isValidData = true;
hmdActualPos = getPositionFromSteamVRMatrix(transformMat);
- glm::vec3 steamVrMatrixPos = hmdActualPos + vrOffsetVec;
- setSteamVRMatrixPos(steamVrMatrixPos, transformMat);
+
+ setSteamVRMatrixPos(hmdActualPos + vrOffsetVec, transformMat);
hmdData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat);
hmdData.devicePos = getPositionFromSteamVRMatrix(transformMat);
@@ -469,8 +447,7 @@ void VRRendererContext::updateVRData() {
leftControllerData.isValidData = getControllerDataResult;
glm::vec3 leftControllerPos = getPositionFromSteamVRMatrix(transformMat);
- glm::vec3 steamVrMatrixPos = leftControllerPos + vrOffsetVec;
- setSteamVRMatrixPos(steamVrMatrixPos, transformMat);
+ setSteamVRMatrixPos(leftControllerPos + vrOffsetVec, transformMat);
leftControllerData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat);
leftControllerData.devicePos = getPositionFromSteamVRMatrix(transformMat);
@@ -486,9 +463,7 @@ void VRRendererContext::updateVRData() {
rightControllerData.isValidData = getControllerDataResult;
glm::vec3 rightControllerPos = getPositionFromSteamVRMatrix(transformMat);
- glm::vec3 steamVrMatrixPos = rightControllerPos + vrOffsetVec;
-
- setSteamVRMatrixPos(steamVrMatrixPos, transformMat);
+ setSteamVRMatrixPos(rightControllerPos + vrOffsetVec, transformMat);
rightControllerData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat);
rightControllerData.devicePos = getPositionFromSteamVRMatrix(transformMat);
@@ -506,8 +481,7 @@ void VRRendererContext::updateVRData() {
// Apply VR offset to tracker position
glm::vec3 trackerPos = getPositionFromSteamVRMatrix(transformMat);
- glm::vec3 steamVrMatrixPos = trackerPos + vrOffsetVec;
- setSteamVRMatrixPos(steamVrMatrixPos, transformMat);
+ setSteamVRMatrixPos(trackerPos + vrOffsetVec, transformMat);
if (this->trackerNamesToData.find(serial) != this->trackerNamesToData.end()) {
this->trackerNamesToData[serial].index = idx;
@@ -667,7 +641,6 @@ glm::vec3 VRRendererContext::getVec3ColFromMat4(int col_index, glm::mat4& mat) {
return v;
}
-#ifdef WIN32
// Initializes the SRAnipal runtime, if the user selects this option
void VRRendererContext::initAnipal() {
if (!ViveSR::anipal::Eye::IsViveProEye()) {
@@ -747,7 +720,6 @@ void VRRendererContext::pollAnipal() {
}
}
}
-#endif
// Print string version of mat4 for debugging purposes
void VRRendererContext::printMat4(glm::mat4& m) {
@@ -901,8 +873,8 @@ PYBIND11_MODULE(VRRendererContext, m) {
pymodule.def("getDataForVRTracker", &VRRendererContext::getDataForVRTracker);
pymodule.def("getDeviceCoordinateSystem", &VRRendererContext::getDeviceCoordinateSystem);
pymodule.def("getEyeTrackingData", &VRRendererContext::getEyeTrackingData);
- pymodule.def("hasEyeTrackingSupport", &VRRendererContext::hasEyeTrackingSupport);
pymodule.def("getVROffset", &VRRendererContext::getVROffset);
+ pymodule.def("hasEyeTrackingSupport", &VRRendererContext::hasEyeTrackingSupport);
pymodule.def("initVR", &VRRendererContext::initVR);
pymodule.def("pollVREvents", &VRRendererContext::pollVREvents);
pymodule.def("postRenderVRForEye", &VRRendererContext::postRenderVRForEye);
diff --git a/igibson/render/cpp/vr_mesh_renderer.h b/igibson/render/cpp/vr_mesh_renderer.h
index 2e1f7035e..f0ea2761b 100644
--- a/igibson/render/cpp/vr_mesh_renderer.h
+++ b/igibson/render/cpp/vr_mesh_renderer.h
@@ -2,11 +2,9 @@
#define VR_MESH_RENDERER_HEADER
#include "glfw_mesh_renderer.h"
-#ifdef WIN32
#include "SRanipal.h"
#include "SRanipal_Eye.h"
#include "SRanipal_Enums.h"
-#endif
#include
@@ -77,16 +75,12 @@ class VRRendererContext : public GLFWRendererContext {
glm::mat4 gibToVr;
glm::mat4 vrToGib;
- bool useEyeTracking;
- bool shouldShutDownEyeTracking;
-
- #ifdef WIN32
// SRAnipal variables
+ bool useEyeTracking;
std::thread* eyeTrackingThread;
ViveSR::anipal::Eye::EyeData eyeData;
- #endif
-
int result;
+ bool shouldShutDownEyeTracking;
// Struct storing eye data for SR anipal - we only return origin and direction in world space
// As most users will want to use this ray to query intersection or something similar
diff --git a/igibson/render/mesh_renderer/get_available_devices.py b/igibson/render/mesh_renderer/get_available_devices.py
index f5410797e..c50ae121f 100644
--- a/igibson/render/mesh_renderer/get_available_devices.py
+++ b/igibson/render/mesh_renderer/get_available_devices.py
@@ -16,28 +16,22 @@ def get_available_devices():
try:
num_devices = int(subprocess.check_output(["{}/query_devices".format(executable_path)]))
except subprocess.CalledProcessError as e:
- return [0], {0: {}}
+ return [0]
FNULL = open(os.devnull, "w")
- device_list = []
- device_info = {}
+ available_devices = []
for i in range(num_devices):
try:
if b"NVIDIA" in subprocess.check_output(["{}/test_device".format(executable_path), str(i)], stderr=FNULL):
- device_list.append(i)
- device_info[i] = {"device_type": "nvidia"}
- logging.info("Device {} is available for rendering".format(i))
- elif b"Intel" in subprocess.check_output(["{}/test_device".format(executable_path), str(i)], stderr=FNULL):
- device_list.append(i)
- device_info[i] = {"device_type": "intel"}
+ available_devices.append(i)
logging.info("Device {} is available for rendering".format(i))
except subprocess.CalledProcessError as e:
logging.info(e)
logging.info("Device {} is not available for rendering".format(i))
FNULL.close()
- return device_list, device_info
+ return available_devices
def get_cuda_device(minor_idx):
@@ -69,6 +63,6 @@ def get_cuda_device(minor_idx):
if __name__ == "__main__":
- graphics_devices, device_info = get_available_devices()
+ graphics_devices = get_available_devices()
logging.info("Graphics Devices: {}".format(graphics_devices))
logging.info("Graphics Device Ids: {}".format([get_cuda_device(item) for item in graphics_devices]))
diff --git a/igibson/render/mesh_renderer/instances.py b/igibson/render/mesh_renderer/instances.py
index d36189f52..cecf7e7fe 100644
--- a/igibson/render/mesh_renderer/instances.py
+++ b/igibson/render/mesh_renderer/instances.py
@@ -18,12 +18,11 @@ def __init__(
id,
link_ids,
pybullet_uuid,
- ig_object,
class_id,
poses_trans,
poses_rot,
dynamic,
- softbody,
+ robot=None,
use_pbr=True,
use_pbr_mapping=True,
shadow_caster=True,
@@ -37,7 +36,7 @@ def __init__(
:param poses_trans: initial translations for each visual object
:param poses_rot: initial rotation matrix for each visual object
:param dynamic: whether the instance group is dynamic
- :param softbody: whether the instance group is for a soft body
+ :param robot: The robot associated with this InstanceGroup
:param use_pbr: whether to use PBR
:param use_pbr_mapping: whether to use PBR mapping
:param shadow_caster: whether to cast shadow
@@ -48,16 +47,15 @@ def __init__(
self.id = id
self.link_ids = link_ids
self.class_id = class_id
+ self.robot = robot
if len(objects) > 0:
self.renderer = objects[0].renderer
else:
self.renderer = None
self.pybullet_uuid = pybullet_uuid
- self.ig_object = ig_object
self.dynamic = dynamic
self.tf_tree = None
- self.softbody = softbody
self.use_pbr = use_pbr
self.use_pbr_mapping = use_pbr_mapping
self.shadow_caster = shadow_caster
@@ -101,33 +99,6 @@ def render(self, shadow_pass=0):
for i, visual_object in enumerate(self.objects):
for object_idx in visual_object.VAO_ids:
- if self.softbody:
- # construct new vertex position into shape format
- vertices = p.getMeshData(self.pybullet_uuid)[1]
- vertices_flattened = [item for sublist in vertices for item in sublist]
- vertex_position = np.array(vertices_flattened).reshape((len(vertices_flattened) // 3, 3))
- shape = self.renderer.shapes[object_idx]
- n_indices = len(shape.mesh.indices)
- np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3))
- shape_vertex_index = np_indices[:, 0]
- shape_vertex = vertex_position[shape_vertex_index]
-
- # update new vertex position in buffer data
- new_data = self.renderer.vertex_data[object_idx]
- new_data[:, 0 : shape_vertex.shape[1]] = shape_vertex
- new_data = new_data.astype(np.float32)
-
- # transform and rotation already included in mesh data
- self.pose_trans = np.eye(4)
- self.pose_rot = np.eye(4)
- self.last_trans = np.eye(4)
- self.last_rot = np.eye(4)
-
- # update buffer data into VBO
- self.renderer.r.render_softbody_instance(
- self.renderer.VAOs[object_idx], self.renderer.VBOs[object_idx], new_data
- )
-
self.renderer.r.init_pos_instance(
self.renderer.shaderProgram,
self.poses_trans[i],
@@ -261,3 +232,239 @@ def __str__(self):
def __repr__(self):
return self.__str__()
+
+
+class Robot(InstanceGroup):
+ """
+ A specfial type of InstanceGroup used for Robots
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(Robot, self).__init__(*args, **kwargs)
+
+ def __str__(self):
+ return "Robot({}) -> Objects({})".format(self.id, ",".join([str(object.id) for object in self.objects]))
+
+
+class Instance(object):
+ """
+ Instance is one instance of a visual object.
+ One visual object can have multiple instances to save memory.
+ """
+
+ def __init__(
+ self,
+ object,
+ id,
+ pybullet_uuid,
+ class_id,
+ pose_trans,
+ pose_rot,
+ dynamic,
+ softbody,
+ use_pbr=True,
+ use_pbr_mapping=True,
+ shadow_caster=True,
+ parent_body=None,
+ ):
+ """
+ :param object: visual object
+ :param id: id of this instance_group
+ :param pybullet_uuid: body id in pybullet
+ :param class_id: class_id to render semantics
+ :param pose_trans: initial translations for the visual object
+ :param pose_rot: initial rotation matrix for the visual object
+ :param dynamic: whether the instance is dynamic
+ :param softbody: whether the instance is soft body
+ :param use_pbr: whether to use PBR
+ :param use_pbr_mapping: whether to use PBR mapping
+ :param shadow_caster: whether to cast shadow
+ :param parent_body: parent body name of current xml element (MuJoCo XML)
+ """
+ self.object = object
+ self.pose_trans = pose_trans
+ self.pose_rot = pose_rot
+ self.id = id
+ self.class_id = class_id
+ self.renderer = object.renderer
+ self.pybullet_uuid = pybullet_uuid
+ self.dynamic = dynamic
+ self.softbody = softbody
+ self.use_pbr = use_pbr
+ self.use_pbr_mapping = use_pbr_mapping
+ self.shadow_caster = shadow_caster
+ self.roughness = 1
+ self.metalness = 0
+ # Determines whether object will be rendered
+ self.hidden = False
+ # Indices into optimized buffers such as color information and transformation buffer
+ # These values are used to set buffer information during simulation
+ self.or_buffer_indices = None
+ self.last_trans = np.copy(pose_trans)
+ self.last_rot = np.copy(pose_rot)
+ self.parent_body = parent_body
+ self.highlight = False
+
+ def set_highlight(self, highlight):
+ self.highlight = highlight
+
+ def render(self, shadow_pass=0):
+ """
+ Render this instance
+ shadow_pass = 0: normal rendering mode, disable shadow
+ shadow_pass = 1: enable_shadow, rendering depth map from light space
+ shadow_pass = 2: use rendered depth map to calculate shadow
+
+ :param shadow_pass: shadow pass mode
+ """
+ if self.renderer is None:
+ return
+
+ # softbody: reload vertex position
+ if self.softbody:
+ # construct new vertex position into shape format
+ object_idx = self.object.VAO_ids[0]
+ vertices = p.getMeshData(self.pybullet_uuid)[1]
+ vertices_flattened = [item for sublist in vertices for item in sublist]
+ vertex_position = np.array(vertices_flattened).reshape((len(vertices_flattened) // 3, 3))
+ shape = self.renderer.shapes[object_idx]
+ n_indices = len(shape.mesh.indices)
+ np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3))
+ shape_vertex_index = np_indices[:, 0]
+ shape_vertex = vertex_position[shape_vertex_index]
+
+ # update new vertex position in buffer data
+ new_data = self.renderer.vertex_data[object_idx]
+ new_data[:, 0 : shape_vertex.shape[1]] = shape_vertex
+ new_data = new_data.astype(np.float32)
+
+ # transform and rotation already included in mesh data
+ self.pose_trans = np.eye(4)
+ self.pose_rot = np.eye(4)
+ self.last_trans = np.eye(4)
+ self.last_rot = np.eye(4)
+
+ # update buffer data into VBO
+ self.renderer.r.render_softbody_instance(
+ self.renderer.VAOs[object_idx], self.renderer.VBOs[object_idx], new_data
+ )
+
+ self.renderer.r.initvar(
+ self.renderer.shaderProgram,
+ self.renderer.V,
+ self.renderer.last_V,
+ self.renderer.lightV,
+ shadow_pass,
+ self.renderer.P,
+ self.renderer.lightP,
+ self.renderer.camera,
+ self.renderer.lightpos,
+ self.renderer.lightcolor,
+ )
+
+ self.renderer.r.init_pos_instance(
+ self.renderer.shaderProgram, self.pose_trans, self.pose_rot, self.last_trans, self.last_rot
+ )
+
+ for object_idx in self.object.VAO_ids:
+ current_material = self.renderer.material_idx_to_material_instance_mapping[
+ self.renderer.shape_material_idx[object_idx]
+ ]
+ self.renderer.r.init_material_instance(
+ self.renderer.shaderProgram,
+ float(self.class_id) / MAX_CLASS_COUNT,
+ float(self.id) / MAX_INSTANCE_COUNT,
+ current_material.kd,
+ float(current_material.is_texture()),
+ float(self.use_pbr),
+ float(self.use_pbr_mapping),
+ float(self.metalness),
+ float(self.roughness),
+ current_material.transform_param,
+ )
+ try:
+
+ texture_id = current_material.texture_id
+ metallic_texture_id = current_material.metallic_texture_id
+ roughness_texture_id = current_material.roughness_texture_id
+ normal_texture_id = current_material.normal_texture_id
+
+ if texture_id is None:
+ texture_id = -1
+ if metallic_texture_id is None:
+ metallic_texture_id = -1
+ if roughness_texture_id is None:
+ roughness_texture_id = -1
+ if normal_texture_id is None:
+ normal_texture_id = -1
+
+ if self.renderer.msaa:
+ buffer = self.renderer.fbo_ms
+ else:
+ buffer = self.renderer.fbo
+
+ self.renderer.r.draw_elements_instance(
+ self.renderer.material_idx_to_material_instance_mapping[
+ self.renderer.shape_material_idx[object_idx]
+ ].is_texture(),
+ texture_id,
+ metallic_texture_id,
+ roughness_texture_id,
+ normal_texture_id,
+ self.renderer.depth_tex_shadow,
+ self.renderer.VAOs[object_idx],
+ self.renderer.faces[object_idx].size,
+ self.renderer.faces[object_idx],
+ buffer,
+ )
+ finally:
+ self.renderer.r.cglBindVertexArray(0)
+
+ self.renderer.r.cglUseProgram(0)
+
+ def get_pose_in_camera(self):
+ """
+ Get instance pose in camera reference frame
+ """
+ mat = self.renderer.V.dot(self.pose_trans.T).dot(self.pose_rot).T
+ pose = np.concatenate([mat2xyz(mat), safemat2quat(mat[:3, :3].T)])
+ return pose
+
+ def set_position(self, pos):
+ """
+ Set position
+
+ :param pos: position
+ """
+ self.last_trans = np.copy(self.pose_trans)
+ self.pose_trans = np.ascontiguousarray(xyz2mat(pos))
+
+ def set_rotation(self, rot):
+ """
+ Set orientation
+
+ :param rot: rotation matrix
+ """
+ self.last_rot = np.copy(self.pose_rot)
+ self.pose_rot = rot
+
+ def dump(self):
+ """
+ Dump vertex and face information
+ """
+ vertices_info = []
+ faces_info = []
+ for vertex_data_index, face_index in zip(self.object.vertex_data_indices, self.object.face_indices):
+ vertices_info.append(
+ transform_vertex(
+ self.renderer.vertex_data[vertex_data_index], pose_rot=self.pose_rot, pose_trans=self.pose_trans
+ )
+ )
+ faces_info.append(self.renderer.faces[face_index])
+ return vertices_info, faces_info
+
+ def __str__(self):
+ return "Instance({}) -> Object({})".format(self.id, self.object.id)
+
+ def __repr__(self):
+ return self.__str__()
diff --git a/igibson/render/mesh_renderer/libcryptopp.8.6.dylib b/igibson/render/mesh_renderer/libcryptopp.8.6.dylib
new file mode 100755
index 000000000..737bf283c
Binary files /dev/null and b/igibson/render/mesh_renderer/libcryptopp.8.6.dylib differ
diff --git a/igibson/render/mesh_renderer/libcryptopp.dylib b/igibson/render/mesh_renderer/libcryptopp.dylib
new file mode 120000
index 000000000..e2b0a542d
--- /dev/null
+++ b/igibson/render/mesh_renderer/libcryptopp.dylib
@@ -0,0 +1 @@
+libcryptopp.8.6.dylib
\ No newline at end of file
diff --git a/igibson/render/mesh_renderer/mesh_renderer_cpu.py b/igibson/render/mesh_renderer/mesh_renderer_cpu.py
index f63dfdd58..e7e894431 100644
--- a/igibson/render/mesh_renderer/mesh_renderer_cpu.py
+++ b/igibson/render/mesh_renderer/mesh_renderer_cpu.py
@@ -12,18 +12,16 @@
import igibson.render.mesh_renderer as mesh_renderer
from igibson.render.mesh_renderer import tinyobjloader
from igibson.render.mesh_renderer.get_available_devices import get_available_devices
-from igibson.render.mesh_renderer.instances import InstanceGroup
-from igibson.render.mesh_renderer.materials import Material, ProceduralMaterial
+from igibson.render.mesh_renderer.instances import Instance, InstanceGroup, Robot
+from igibson.render.mesh_renderer.materials import Material, ProceduralMaterial, RandomizedMaterial
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.mesh_renderer.text import Text, TextManager
from igibson.render.mesh_renderer.visual_object import VisualObject
from igibson.robots.behavior_robot import BehaviorRobot
-from igibson.robots.robot_base import BaseRobot
from igibson.utils.constants import AVAILABLE_MODALITIES, MAX_CLASS_COUNT, MAX_INSTANCE_COUNT, ShadowPass
from igibson.utils.mesh_util import lookat, mat2xyz, ortho, perspective, quat2rotmat, safemat2quat, xyz2mat, xyzw2wxyz
Image.MAX_IMAGE_PIXELS = None
-NO_MATERIAL_DEFINED_IN_SHAPE_AND_NO_OVERWRITE_SUPPLIED = -1
class MeshRenderer(object):
@@ -47,8 +45,8 @@ def __init__(
:param height: width of the renderer output
:param vertical_fov: vertical field of view for the renderer
:param device_idx: which GPU to run the renderer on
- :param rendering_settings: rendering settings
- :param simulator: simulator object
+ :param render_settings: rendering settings
+ :param simulator: Simulator object.
"""
self.simulator = simulator
self.rendering_settings = rendering_settings
@@ -69,7 +67,6 @@ def __init__(
self.height = height
self.faces = []
self.instances = []
- self.update_instance_id_to_pb_id_map()
self.fisheye = rendering_settings.use_fisheye
self.optimized = rendering_settings.optimized
self.texture_files = {}
@@ -85,13 +82,47 @@ def __init__(
self.text_manager = TextManager(self)
self.texts = []
- self.msaa = rendering_settings.msaa
+ device = None
+ """
+ device_idx is the major id
+ device is the minor id
+ you can get it from nvidia-smi -a
+
+ The minor number for the device is such that the Nvidia device node file for each GPU will have the form
+ /dev/nvidia[minor number]. Available only on Linux platform.
+
+ TODO: add device management for windows platform.
+ """
+
+ if os.environ.get("GIBSON_DEVICE_ID", None):
+ device = int(os.environ.get("GIBSON_DEVICE_ID"))
+ logging.info(
+ "GIBSON_DEVICE_ID environment variable has been manually set. "
+ "Using device {} for rendering".format(device)
+ )
+ else:
+ if self.platform != "Windows":
+ available_devices = get_available_devices()
+ if device_idx < len(available_devices):
+ device = available_devices[device_idx]
+ logging.info("Using device {} for rendering".format(device))
+ else:
+ logging.info("Device index is larger than number of devices, falling back to use 0")
+ logging.info(
+ "If you have trouble using EGL, please visit our trouble shooting guide",
+ "at http://svl.stanford.edu/igibson/docs/issues.html",
+ )
+
+ device = 0
+ self.device_idx = device_idx
+ self.device_minor = device
+ self.msaa = rendering_settings.msaa
+ if self.platform == "Darwin" and self.optimized:
+ logging.error("Optimized renderer is not supported on Mac")
+ exit()
if self.platform == "Darwin":
- if self.optimized:
- logging.error("Optimized renderer is not supported on Mac")
- exit()
- from igibson.render.mesh_renderer import GLFWRendererContext # type: ignore
+ from igibson.render.mesh_renderer import GLFWRendererContext
self.r = GLFWRendererContext.GLFWRendererContext(
width,
@@ -101,9 +132,8 @@ def __init__(
self.rendering_settings.show_glfw_window,
rendering_settings.fullscreen,
)
-
- elif self.platform == "Windows" or self.__class__.__name__ == "MeshRendererVR":
- from igibson.render.mesh_renderer import VRRendererContext # type: ignore
+ elif self.platform == "Windows":
+ from igibson.render.mesh_renderer import VRRendererContext
self.r = VRRendererContext.VRRendererContext(
width,
@@ -113,47 +143,10 @@ def __init__(
self.rendering_settings.show_glfw_window,
rendering_settings.fullscreen,
)
-
- elif self.platform == "Linux":
+ else:
from igibson.render.mesh_renderer import EGLRendererContext
- """
- device_idx is the major id
- device is the minor id
- you can get it from nvidia-smi -a
-
- The minor number for the device is such that the Nvidia device node file for each GPU will have the form
- /dev/nvidia[minor number]. Available only on Linux platform.
-
- """
-
- device = os.environ.get("GIBSON_DEVICE_ID", None)
- if device:
- device = int(device)
- logging.info(
- "GIBSON_DEVICE_ID environment variable has been manually set. "
- "Using device {} for rendering".format(device)
- )
- else:
- available_devices, _ = get_available_devices()
- if device_idx < len(available_devices):
- device = available_devices[device_idx]
- logging.info("Using device {} for rendering".format(device))
- else:
- logging.info("Device index is larger than number of devices, falling back to use 0")
- logging.info(
- "If you have trouble using EGL, please visit our trouble shooting guide"
- "at http://svl.stanford.edu/igibson/docs/issues.html",
- )
-
- device = 0
-
- self.device_idx = device_idx
- self.device_minor = device
-
self.r = EGLRendererContext.EGLRendererContext(width, height, device)
- else:
- raise Exception("Unsupported platform and renderer combination")
self.r.init()
@@ -167,30 +160,91 @@ def __init__(
logging.debug("Is using fisheye camera: {}".format(self.fisheye))
- if self.rendering_settings.glsl_version_override:
- glsl_version = str(self.rendering_settings.glsl_version_override)
- shader_available = glsl_version in ["450", "460"]
- assert shader_available, "Error: only GLSL version 450 and 460 shaders are supported"
- else:
- glsl_version = "460"
-
if self.fisheye:
logging.error("Fisheye is currently not supported.")
exit(1)
else:
if self.platform == "Darwin":
- self.shaderProgram = self.get_shader_program("410", "vert.shader", "frag.shader")
- self.textShaderProgram = self.get_shader_program("410", "text_vert.shader", "text_frag.shader")
+ self.shaderProgram = self.r.compile_shader_meshrenderer(
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "vert.shader")
+ ).readlines()
+ ),
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "frag.shader")
+ ).readlines()
+ ),
+ )
+ self.textShaderProgram = self.r.compile_shader_meshrenderer(
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "text_vert.shader")
+ ).readlines()
+ ),
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "text_frag.shader")
+ ).readlines()
+ ),
+ )
else:
if self.optimized:
- self.shaderProgram = self.get_shader_program(
- glsl_version, "optimized_vert.shader", "optimized_frag.shader"
+ self.shaderProgram = self.r.compile_shader_meshrenderer(
+ "".join(
+ open(
+ os.path.join(
+ os.path.dirname(mesh_renderer.__file__), "shaders", "450", "optimized_vert.shader"
+ )
+ ).readlines()
+ ),
+ "".join(
+ open(
+ os.path.join(
+ os.path.dirname(mesh_renderer.__file__), "shaders", "450", "optimized_frag.shader"
+ )
+ ).readlines()
+ ),
)
else:
- self.shaderProgram = self.get_shader_program(glsl_version, "vert.shader", "frag.shader")
- self.textShaderProgram = self.get_shader_program(glsl_version, "text_vert.shader", "text_frag.shader")
+ self.shaderProgram = self.r.compile_shader_meshrenderer(
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "vert.shader")
+ ).readlines()
+ ),
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "frag.shader")
+ ).readlines()
+ ),
+ )
+ self.textShaderProgram = self.r.compile_shader_meshrenderer(
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "text_vert.shader")
+ ).readlines()
+ ),
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "text_frag.shader")
+ ).readlines()
+ ),
+ )
- self.skyboxShaderProgram = self.get_shader_program("410", "skybox_vs.glsl", "skybox_fs.glsl")
+ self.skyboxShaderProgram = self.r.compile_shader_meshrenderer(
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "skybox_vs.glsl")
+ ).readlines()
+ ),
+ "".join(
+ open(
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "skybox_fs.glsl")
+ ).readlines()
+ ),
+ )
# default light looking down and tilted
self.set_light_position_direction([0, 0, 2], [0, 0.5, 0])
@@ -226,40 +280,16 @@ def __init__(
self.skybox_size = rendering_settings.skybox_size
if not self.platform == "Darwin" and rendering_settings.enable_pbr:
- self.setup_pbr(glsl_version)
+ self.setup_pbr()
self.setup_lidar_param()
# Set up text FBO
self.text_manager.gen_text_fbo()
- def get_shader_program(self, glsl_version, vertex_source, fragment_source):
- """
- Get shader program.
-
- :param glsl_version: GLSL version
- :param vertex_source: vertex shader source
- :param fragment_source: fragment shader source
- :return: a program object to which vertex shader and fragment shader are attached
- """
- return self.r.compile_shader_meshrenderer(
- "".join(
- open(
- os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", glsl_version, vertex_source)
- ).readlines()
- ),
- "".join(
- open(
- os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", glsl_version, fragment_source)
- ).readlines()
- ),
- )
-
- def setup_pbr(self, glsl_version):
+ def setup_pbr(self):
"""
- Set up physics-based rendering.
-
- :param glsl_version: GLSL version
+ Set up physics-based rendering
"""
if (
os.path.exists(self.rendering_settings.env_texture_filename)
@@ -267,7 +297,7 @@ def setup_pbr(self, glsl_version):
or os.path.exists(self.rendering_settings.env_texture_filename3)
):
self.r.setup_pbr(
- os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", glsl_version),
+ os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450"),
self.rendering_settings.env_texture_filename,
self.rendering_settings.env_texture_filename2,
self.rendering_settings.env_texture_filename3,
@@ -281,7 +311,7 @@ def setup_pbr(self, glsl_version):
def set_light_position_direction(self, position, target):
"""
- Set light position and orientation.
+ Set light position and orientation
:param position: light position
:param target: light target
@@ -291,7 +321,7 @@ def set_light_position_direction(self, position, target):
def setup_framebuffer(self):
"""
- Set up framebuffers for the renderer.
+ Set up framebuffers for the renderer
"""
[
self.fbo,
@@ -320,13 +350,12 @@ def setup_framebuffer(self):
self.depth_tex_shadow = self.r.allocateTexture(self.width, self.height)
- def load_texture_file(self, tex_filename, texture_scale):
+ def load_texture_file(self, tex_filename):
"""
- Load the texture file into the renderer.
+ Load the texture file into the renderer
:param tex_filename: texture file filename
- :param texture_scale: a file-specific texture scale to be multiplied with the global scale in the settings
- :return: texture id of this texture in the renderer
+ :return texture_id: texture id of this texture in the renderer
"""
# if texture is None or does not exist, return None
if tex_filename is None or (not os.path.isfile(tex_filename)):
@@ -340,36 +369,32 @@ def load_texture_file(self, tex_filename, texture_scale):
# assume optimized renderer will have texture id starting from 0
texture_id = len(self.texture_files)
else:
- texture_id = self.r.loadTexture(
- tex_filename, texture_scale * self.rendering_settings.texture_scale, igibson.key_path
- )
+ texture_id = self.r.loadTexture(tex_filename, self.rendering_settings.texture_scale, igibson.key_path)
self.textures.append(texture_id)
self.texture_files[tex_filename] = texture_id
return texture_id
- def load_procedural_material(self, material, texture_scale):
+ def load_procedural_material(self, material):
material.lookup_or_create_transformed_texture()
has_encrypted_texture = os.path.exists(os.path.join(material.material_folder, "DIFFUSE.encrypted.png"))
suffix = ".encrypted.png" if has_encrypted_texture else ".png"
- material.texture_id = self.load_texture_file(
- os.path.join(material.material_folder, "DIFFUSE{}".format(suffix)), texture_scale
- )
+ material.texture_id = self.load_texture_file(os.path.join(material.material_folder, "DIFFUSE{}".format(suffix)))
material.metallic_texture_id = self.load_texture_file(
- os.path.join(material.material_folder, "METALLIC{}".format(suffix)), texture_scale
+ os.path.join(material.material_folder, "METALLIC{}".format(suffix))
)
material.roughness_texture_id = self.load_texture_file(
- os.path.join(material.material_folder, "ROUGHNESS{}".format(suffix)), texture_scale
+ os.path.join(material.material_folder, "ROUGHNESS{}".format(suffix))
)
material.normal_texture_id = self.load_texture_file(
- os.path.join(material.material_folder, "NORMAL{}".format(suffix)), texture_scale
+ os.path.join(material.material_folder, "NORMAL{}".format(suffix))
)
for state in material.states:
- transformed_diffuse_id = self.load_texture_file(material.texture_filenames[state], texture_scale)
+ transformed_diffuse_id = self.load_texture_file(material.texture_filenames[state])
material.texture_ids[state] = transformed_diffuse_id
material.default_texture_id = material.texture_id
- def load_randomized_material(self, material, texture_scale):
+ def load_randomized_material(self, material):
"""
Load all the texture files in the RandomizedMaterial.
Populate material_ids with the texture id assigned by the renderer.
@@ -386,7 +411,7 @@ def load_randomized_material(self, material, texture_scale):
for material_instance in material.material_files[material_class]:
material_id_instance = {}
for key in material_instance:
- material_id_instance[key] = self.load_texture_file(material_instance[key], texture_scale)
+ material_id_instance[key] = self.load_texture_file(material_instance[key])
material.material_ids[material_class].append(material_id_instance)
material.randomize()
@@ -398,6 +423,7 @@ def load_object(
transform_pos=None,
input_kd=None,
texture_scale=1.0,
+ load_texture=True,
overwrite_material=None,
):
"""
@@ -408,18 +434,19 @@ def load_object(
:param transform_orn: rotation quaternion, convention xyzw
:param transform_pos: translation for loading, it is a list of length 3
:param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3
- :param texture_scale: texture scale for the object, downsample to save memory
+ :param texture_scale: texture scale for the object, downsample to save memory.
+ :param load_texture: load texture or not
:param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization)
:return: VAO_ids
"""
if self.optimization_process_executed and self.optimized:
logging.error(
- "Using optimized renderer and optimization process is already excuted, cannot add new objects"
+ "Using optimized renderer and optimization process is already excuted, cannot add new " "objects"
)
return
reader = tinyobjloader.ObjReader()
- logging.info("Loading {}".format(obj_path))
+ # logging.info("Loading {}".format(obj_path))
if obj_path.endswith("encrypted.obj"):
if not os.path.exists(igibson.key_path):
raise FileNotFoundError(
@@ -460,32 +487,24 @@ def load_object(
# set the default values of variable before being modified later.
num_existing_mats = len(self.material_idx_to_material_instance_mapping) # Number of current Material elements
+ num_added_materials = len(materials)
- # No MTL is supplied, or MTL is empty
- if len(materials) == 0:
- # Case when mesh obj is without mtl file but overwrite material is specified.
- if overwrite_material is not None:
- self.material_idx_to_material_instance_mapping[num_existing_mats] = overwrite_material
- num_added_materials = 1
- else:
- num_added_materials = 0
- else:
+ if num_added_materials > 0:
# Deparse the materials in the obj file by loading textures into the renderer's memory and creating a
# Material element for them
- num_added_materials = len(materials)
for i, item in enumerate(materials):
if overwrite_material is not None:
+ if isinstance(overwrite_material, RandomizedMaterial):
+ self.load_randomized_material(overwrite_material)
+ elif isinstance(overwrite_material, ProceduralMaterial):
+ self.load_procedural_material(overwrite_material)
material = overwrite_material
- elif item.diffuse_texname != "" and self.rendering_settings.load_textures:
+ elif item.diffuse_texname != "" and load_texture:
obj_dir = os.path.dirname(obj_path)
- texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname), texture_scale)
- texture_metallic = self.load_texture_file(
- os.path.join(obj_dir, item.metallic_texname), texture_scale
- )
- texture_roughness = self.load_texture_file(
- os.path.join(obj_dir, item.roughness_texname), texture_scale
- )
- texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname), texture_scale)
+ texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname))
+ texture_metallic = self.load_texture_file(os.path.join(obj_dir, item.metallic_texname))
+ texture_roughness = self.load_texture_file(os.path.join(obj_dir, item.roughness_texname))
+ texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname))
material = Material(
"texture",
texture_id=texture,
@@ -501,19 +520,22 @@ def load_object(
else:
material = Material("color", kd=item.diffuse)
self.material_idx_to_material_instance_mapping[num_existing_mats + i] = material
+ else:
+ # Case when mesh obj is without mtl file but overwrite material is specified.
+ if overwrite_material is not None:
+ self.material_idx_to_material_instance_mapping[num_existing_mats] = overwrite_material
+ num_added_materials = 1
- # material index = num_existing_mats ... num_existing_mats + num_added_materials - 1 (inclusive) are using
- # materials from mesh or from overwrite_material
+ # material index = num_existing_mats ... num_existing_mats + num_added_materials - 1 are using materials from mesh or
+ # from overwrite_material
# material index = num_existing_mats + num_added_materials is a fail-safe default material
- idx_of_failsafe_material = num_existing_mats + num_added_materials
-
if input_kd is not None: # append the default material in the end, in case material loading fails
- self.material_idx_to_material_instance_mapping[idx_of_failsafe_material] = Material(
+ self.material_idx_to_material_instance_mapping[num_existing_mats + num_added_materials] = Material(
"color", kd=input_kd, texture_id=-1
)
else:
- self.material_idx_to_material_instance_mapping[idx_of_failsafe_material] = Material(
+ self.material_idx_to_material_instance_mapping[num_existing_mats + num_added_materials] = Material(
"color", kd=[0.5, 0.5, 0.5], texture_id=-1
)
@@ -525,18 +547,13 @@ def load_object(
for shape in shapes:
logging.debug("Shape name: {}".format(shape.name))
- if len(shape.mesh.material_ids) == 0 or shape.mesh.material_ids[0] == -1:
- # material not found, or invalid material, as defined here
- # https://github.com/tinyobjloader/tinyobjloader/blob/master/tiny_obj_loader.h#L2997
+ if len(shape.mesh.material_ids) == 0:
if overwrite_material is not None:
material_id = 0
- # shape don't have material id, use material 0, which is the overwrite material
else:
- material_id = NO_MATERIAL_DEFINED_IN_SHAPE_AND_NO_OVERWRITE_SUPPLIED
- # if no material and no overwrite material is supplied
+ material_id = -1 # if no material and no overwrite material is supplied
else:
material_id = shape.mesh.material_ids[0]
- # assumption: each shape only have one material
logging.debug("material_id = {}".format(material_id))
logging.debug("num_indices = {}".format(len(shape.mesh.indices)))
@@ -578,13 +595,13 @@ def load_object(
# Translate the shape after they are scaled
shape_vertex += np.array(transform_pos)
- # Compute tangents and bitangents for tangent space normal mapping.
v0 = shape_vertex[0::3, :]
v1 = shape_vertex[1::3, :]
v2 = shape_vertex[2::3, :]
uv0 = shape_texcoord[0::3, :]
uv1 = shape_texcoord[1::3, :]
uv2 = shape_texcoord[2::3, :]
+
delta_pos1 = v1 - v0
delta_pos2 = v2 - v0
delta_uv1 = uv1 - uv0
@@ -592,10 +609,8 @@ def load_object(
r = 1.0 / (delta_uv1[:, 0] * delta_uv2[:, 1] - delta_uv1[:, 1] * delta_uv2[:, 0])
tangent = (delta_pos1 * delta_uv2[:, 1][:, None] - delta_pos2 * delta_uv1[:, 1][:, None]) * r[:, None]
bitangent = (delta_pos2 * delta_uv1[:, 0][:, None] - delta_pos1 * delta_uv2[:, 0][:, None]) * r[:, None]
- # Set the same tangent and bitangent for all three vertices of the triangle.
- tangent = tangent.repeat(3, axis=0)
bitangent = bitangent.repeat(3, axis=0)
-
+ tangent = tangent.repeat(3, axis=0)
vertices = np.concatenate([shape_vertex, shape_normal, shape_texcoord, tangent, bitangent], axis=-1)
faces = np.array(range(len(vertices))).reshape((len(vertices) // 3, 3))
vertexData = vertices.astype(np.float32)
@@ -609,9 +624,8 @@ def load_object(
self.vertex_data.append(vertexData)
self.shapes.append(shape)
# if material loading fails, use the default material
- if material_id == NO_MATERIAL_DEFINED_IN_SHAPE_AND_NO_OVERWRITE_SUPPLIED:
- # use fall back material
- self.shape_material_idx.append(idx_of_failsafe_material)
+ if material_id == -1:
+ self.shape_material_idx.append(num_added_materials + num_existing_mats)
else:
self.shape_material_idx.append(material_id + num_existing_mats)
@@ -629,32 +643,85 @@ def load_object(
self.visual_objects.append(new_obj)
return VAO_ids
+ def add_instance(
+ self,
+ object_id,
+ pybullet_uuid=None,
+ class_id=0,
+ pose_trans=np.eye(4),
+ pose_rot=np.eye(4),
+ dynamic=False,
+ softbody=False,
+ use_pbr=True,
+ use_pbr_mapping=True,
+ shadow_caster=True,
+ parent_body=None,
+ ):
+ """
+ Create instance for a visual object and link it to pybullet
+
+ :param object_id: id of visual object
+ :param pybullet_uuid: body id in pybullet
+ :param class_id: class_id to render semantics
+ :param pose_trans: initial translations for the visual object
+ :param pose_rot: initial rotation matrix for the visual object
+ :param dynamic: whether the instance is dynamic
+ :param softbody: whether the instance is soft body
+ :param use_pbr: whether to use PBR
+ :param use_pbr_mapping: whether to use PBR mapping
+ :param shadow_caster: whether to cast shadow
+ :param parent_body: parent body name of current xml element (MuJoCo XML)
+ """
+ if self.optimization_process_executed and self.optimized:
+ logging.error(
+ "Using optimized renderer and optimization process is already excuted, cannot add new " "objects"
+ )
+ return
+
+ use_pbr = use_pbr and self.rendering_settings.enable_pbr
+ use_pbr_mapping = use_pbr_mapping and self.rendering_settings.enable_pbr
+
+ instance = Instance(
+ self.visual_objects[object_id],
+ id=len(self.instances),
+ pybullet_uuid=pybullet_uuid,
+ class_id=class_id,
+ pose_trans=pose_trans,
+ pose_rot=pose_rot,
+ dynamic=dynamic,
+ softbody=softbody,
+ use_pbr=use_pbr,
+ use_pbr_mapping=use_pbr_mapping,
+ shadow_caster=shadow_caster,
+ parent_body=parent_body,
+ )
+ self.instances.append(instance)
+
def add_instance_group(
self,
object_ids,
- link_ids=[-1],
- poses_trans=[np.eye(4)],
- poses_rot=[np.eye(4)],
+ link_ids,
+ poses_trans,
+ poses_rot,
pybullet_uuid=None,
- ig_object=None,
class_id=0,
dynamic=False,
- softbody=False,
+ robot=None,
use_pbr=True,
use_pbr_mapping=True,
shadow_caster=True,
):
"""
- Create an instance group for a list of visual objects and link it to pybullet.
+ Create an instance group for a list of visual objects and link it to pybullet
:param object_ids: object ids of the visual objects
:param link_ids: link_ids in pybullet
:param poses_trans: initial translations for each visual object
:param poses_rot: initial rotation matrix for each visual object
:param pybullet_uuid: body id in pybullet
- :param ig_object: iGibson object associated with this instance group
:param class_id: class_id to render semantics
:param dynamic: whether the instance group is dynamic
+ :param robot: The robot associated with this InstanceGroup
:param use_pbr: whether to use PBR
:param use_pbr_mapping: whether to use PBR mapping
:param shadow_caster: whether to cast shadow
@@ -674,18 +741,53 @@ def add_instance_group(
id=len(self.instances),
link_ids=link_ids,
pybullet_uuid=pybullet_uuid,
- ig_object=ig_object,
class_id=class_id,
poses_trans=poses_trans,
poses_rot=poses_rot,
dynamic=dynamic,
- softbody=softbody,
+ robot=robot,
use_pbr=use_pbr,
use_pbr_mapping=use_pbr_mapping,
shadow_caster=shadow_caster,
)
self.instances.append(instance_group)
- self.update_instance_id_to_pb_id_map()
+
+ def add_robot(
+ self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None
+ ):
+ """
+ Create an instance group (a robot) for a list of visual objects and link it to pybullet
+
+ :param object_ids: object ids of the visual objects
+ :param link_ids: link_ids in pybullet
+ :param poses_trans: initial translations for each visual object
+ :param poses_rot: initial rotation matrix for each visual object
+ :param pybullet_uuid: body id in pybullet
+ :param class_id: class_id to render semantics
+ :param dynamic: whether the instance group is dynamic
+ :param robot: The robot associated with this InstanceGroup
+ """
+
+ if self.optimization_process_executed and self.optimized:
+ logging.error(
+ "Using optimized renderer and optimization process is already excuted, cannot add new " "objects"
+ )
+ return
+
+ robot = Robot(
+ [self.visual_objects[object_id] for object_id in object_ids],
+ id=len(self.instances),
+ link_ids=link_ids,
+ pybullet_uuid=pybullet_uuid,
+ class_id=class_id,
+ poses_trans=poses_trans,
+ poses_rot=poses_rot,
+ dynamic=dynamic,
+ robot=robot,
+ use_pbr=False,
+ use_pbr_mapping=False,
+ )
+ self.instances.append(robot)
def add_text(
self,
@@ -703,7 +805,6 @@ def add_text(
"""
Creates a Text object with the given parameters. Returns the text object to the caller,
so various settings can be changed - eg. text content, position, scale, etc.
-
:param text_data: starting text to display (can be changed at a later time by set_text)
:param font_name: name of font to render - same as font folder in iGibson assets
:param font_style: style of font - one of [regular, italic, bold]
@@ -734,7 +835,7 @@ def add_text(
def set_camera(self, camera, target, up, cache=False):
"""
- Set camera pose.
+ Set camera pose
:param camera: camera position
:param target: camera target
@@ -756,7 +857,7 @@ def set_camera(self, camera, target, up, cache=False):
def set_z_near_z_far(self, znear, zfar):
"""
- Set z limit for camera.
+ Set z limit for camera
:param znear: lower limit for z
:param zfar: upper limit for z
@@ -780,7 +881,7 @@ def set_fov(self, fov):
def set_light_color(self, color):
"""
- Set light color.
+ Set light color
:param color: light color
"""
@@ -788,7 +889,7 @@ def set_light_color(self, color):
def get_intrinsics(self):
"""
- Get camera intrinsics.
+ Get camera intrinsics
:return: camera instrincs
"""
@@ -812,7 +913,7 @@ def get_intrinsics(self):
def set_projection_matrix(self, fu, fv, u0, v0, znear, zfar):
"""
- Set projection matrix, given camera intrincs parameters.
+ Set projection matrix, given camera intrincs parameters
"""
w = self.width
h = self.height
@@ -872,7 +973,7 @@ def render(
"""
A function to render all the instances in the renderer and read the output from framebuffer.
- :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow')
+ :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').
:param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden
:param return_buffer: whether to return the frame buffers as numpy arrays
:param render_shadow_pass: whether to render shadow
@@ -1017,19 +1118,19 @@ def render_companion_window(self):
def get_visual_objects(self):
"""
- Return visual objects.
+ Return visual objects
"""
return self.visual_objects
def get_instances(self):
"""
- Return instances.
+ Return instances
"""
return self.instances
def dump(self):
"""
- Dump instance vertex and face information.
+ Dump instance vertex and face information
"""
instances_vertices = []
instances_faces = []
@@ -1047,7 +1148,7 @@ def dump(self):
def set_light_pos(self, light):
"""
- Set light position.
+ Set light position
:param light: light position
"""
@@ -1055,13 +1156,13 @@ def set_light_pos(self, light):
def get_num_objects(self):
"""
- Return the number of objects.
+ Return the number of objects
"""
return len(self.objects)
def set_pose(self, pose, idx):
"""
- Set pose for a specific instance.
+ Set pose for a specific instance
:param pose: instance pose
:param idx: instance id
@@ -1081,7 +1182,7 @@ def release(self):
def clean(self):
"""
- Clean all the framebuffers, objects and instances.
+ Clean all the framebuffers, objects and instances
"""
clean_list = [
self.color_tex_rgb,
@@ -1139,7 +1240,6 @@ def clean(self):
self.faces = [] # GC should free things here
self.visual_objects = []
self.instances = []
- self.update_instance_id_to_pb_id_map()
self.vertex_data = []
self.shapes = []
save_path = os.path.join(igibson.ig_dataset_path, "tmp")
@@ -1169,7 +1269,7 @@ def transform_point(self, vec):
def transform_pose(self, pose):
"""
- Transform pose from world frame to camera frame.
+ Transform pose from world frame to camera frame
:param pose: pose in world frame
:return: pose in camera frame
@@ -1182,15 +1282,15 @@ def transform_pose(self, pose):
def render_active_cameras(self, modes=("rgb")):
"""
Render camera images for the active cameras. This is applicable for robosuite integration with iGibson,
- where there are multiple cameras defined but only some are active (e.g., to switch between views with TAB).
+ where there are multiple cameras defined but only some are active (e.g., to switch between views with TAB)
:return: a list of frames (number of modalities x number of robots)
"""
frames = []
hide_robot = self.rendering_settings.hide_robot
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- for camera in instance.ig_object.cameras:
+ if isinstance(instance, Robot):
+ for camera in instance.robot.cameras:
if camera.is_active():
camera_pose = camera.get_pose()
camera_pos = camera_pose[:3]
@@ -1206,19 +1306,18 @@ def render_active_cameras(self, modes=("rgb")):
def render_robot_cameras(self, modes=("rgb")):
"""
- Render robot camera images.
+ Render robot camera images
:return: a list of frames (number of modalities x number of robots)
"""
frames = []
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- camera_pos = instance.ig_object.eyes.get_position()
- orn = instance.ig_object.eyes.get_orientation()
+ if isinstance(instance, Robot):
+ camera_pos = instance.robot.eyes.get_position()
+ orn = instance.robot.eyes.get_orientation()
mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3]
view_direction = mat.dot(np.array([1, 0, 0]))
- up_direction = mat.dot(np.array([0, 0, 1]))
- self.set_camera(camera_pos, camera_pos + view_direction, up_direction, cache=True)
+ self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1], cache=True)
hidden_instances = []
if self.rendering_settings.hide_robot:
hidden_instances.append(instance)
@@ -1243,8 +1342,8 @@ def _get_names_active_cameras(self):
"""
names = []
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- for camera in instance.ig_object.cameras:
+ if isinstance(instance, Robot):
+ for camera in instance.robot.cameras:
if camera.is_active():
names.append(camera.camera_name)
return names
@@ -1255,8 +1354,8 @@ def _switch_camera(self, idx):
Applicable for integration with iGibson.
"""
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- instance.ig_object.cameras[idx].switch()
+ if isinstance(instance, Robot):
+ instance.robot.cameras[idx].switch()
def _is_camera_active(self, idx):
"""
@@ -1264,8 +1363,8 @@ def _is_camera_active(self, idx):
Applicable for integration with iGibson.
"""
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- return instance.ig_object.cameras[idx].is_active()
+ if isinstance(instance, Robot):
+ return instance.robot.cameras[idx].is_active()
def _get_camera_name(self, idx):
"""
@@ -1273,15 +1372,15 @@ def _get_camera_name(self, idx):
Applicable for integration with iGibson.
"""
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- return instance.ig_object.cameras[idx].camera_name
+ if isinstance(instance, Robot):
+ return instance.robot.cameras[idx].camera_name
def optimize_vertex_and_texture(self):
"""
- Optimize vertex and texture for optimized renderer.
+ Optimize vertex and texture for optimized renderer
"""
- for tex_file in self.texture_files:
- print("Texture: ", tex_file)
+ # for tex_file in self.texture_files:
+ # print("Texture: ", tex_file)
# Set cutoff about 4096, otherwise we end up filling VRAM very quickly
cutoff = 5000 * 5000
shouldShrinkSmallTextures = True
@@ -1319,22 +1418,34 @@ def optimize_vertex_and_texture(self):
hidden_array = []
for instance in self.instances:
- id_sum = 0
- # Collect OR buffer indices over all visual objects in this group
- temp_or_buffer_indices = []
- for vo in instance.objects:
- ids = vo.VAO_ids
+ if isinstance(instance, Instance):
+ ids = instance.object.VAO_ids
or_buffer_idx_start = len(duplicate_vao_ids)
duplicate_vao_ids.extend(ids)
or_buffer_idx_end = len(duplicate_vao_ids)
- # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use
- temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end)))
- id_sum += len(ids)
- instance.or_buffer_indices = list(temp_or_buffer_indices)
- class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum)
- instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum)
- pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)
- hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)
+ # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this Instance will use
+ instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end))
+ class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * len(ids))
+ instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * len(ids))
+ pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))
+ hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))
+ elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot):
+ id_sum = 0
+ # Collect OR buffer indices over all visual objects in this group
+ temp_or_buffer_indices = []
+ for vo in instance.objects:
+ ids = vo.VAO_ids
+ or_buffer_idx_start = len(duplicate_vao_ids)
+ duplicate_vao_ids.extend(ids)
+ or_buffer_idx_end = len(duplicate_vao_ids)
+ # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use
+ temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end)))
+ id_sum += len(ids)
+ instance.or_buffer_indices = list(temp_or_buffer_indices)
+ class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum)
+ instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum)
+ pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)
+ hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)
# Number of shapes in the OR buffer is equal to the number of duplicate vao_ids
self.or_buffer_shape_num = len(duplicate_vao_ids)
@@ -1495,7 +1606,7 @@ def optimize_vertex_and_texture(self):
def update_optimized_texture_internal(self):
"""
- Update the texture_id for optimized renderer.
+ Update the texture_id for optimized renderer
"""
# Some of these may share visual data, but have unique transforms
duplicate_vao_ids = []
@@ -1509,24 +1620,39 @@ def update_optimized_texture_internal(self):
hidden_array = []
for instance in self.instances:
- id_sum = 0
- # Collect OR buffer indices over all visual objects in this group
- temp_or_buffer_indices = []
- for vo in instance.objects:
- ids = vo.VAO_ids
+ if isinstance(instance, Instance):
+ ids = instance.object.VAO_ids
or_buffer_idx_start = len(duplicate_vao_ids)
duplicate_vao_ids.extend(ids)
or_buffer_idx_end = len(duplicate_vao_ids)
- # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use
- temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end)))
- id_sum += len(ids)
- instance.or_buffer_indices = list(temp_or_buffer_indices)
- class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum)
- instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum)
- pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)
- hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)
+ # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this Instance will use
+ instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end))
+ class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * len(ids))
+ instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * len(ids))
+ pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))
+ hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))
+ elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot):
+ id_sum = 0
+ # Collect OR buffer indices over all visual objects in this group
+ temp_or_buffer_indices = []
+ for vo in instance.objects:
+ ids = vo.VAO_ids
+ or_buffer_idx_start = len(duplicate_vao_ids)
+ duplicate_vao_ids.extend(ids)
+ or_buffer_idx_end = len(duplicate_vao_ids)
+ # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use
+ temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end)))
+ id_sum += len(ids)
+ instance.or_buffer_indices = list(temp_or_buffer_indices)
+ class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum)
+ instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum)
+ pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)
+ hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)
# Variables needed for multi draw elements call
+ index_ptr_offsets = []
+ index_counts = []
+ indices = []
diffuse_color_array = []
tex_num_array = []
tex_layer_array = []
@@ -1637,7 +1763,7 @@ def update_optimized_texture_internal(self):
def update_hidden_highlight_state(self, instances):
"""
- Update the hidden state of a list of instances.
+ Updates the hidden state of a list of instances
This function is called by instances and not every frame, since hiding is a very infrequent operation.
"""
@@ -1665,12 +1791,21 @@ def update_dynamic_positions(self, need_flow_info=False):
:param need_flow_info: whether flow information is required
"""
for instance in self.instances:
- buf_idxs = instance.or_buffer_indices
- # Continue if instance has no visual objects
- if not buf_idxs:
- continue
- self.trans_data[buf_idxs] = np.array(instance.poses_trans)
- self.rot_data[buf_idxs] = np.array(instance.poses_rot)
+ # if instance.dynamic:
+ if isinstance(instance, Instance):
+ buf_idxs = instance.or_buffer_indices
+ # Continue if instance has no visual objects
+ if not buf_idxs:
+ continue
+ self.trans_data[buf_idxs] = np.array(instance.pose_trans)
+ self.rot_data[buf_idxs] = np.array(instance.pose_rot)
+ elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot):
+ buf_idxs = instance.or_buffer_indices
+ # Continue if instance has no visual objects
+ if not buf_idxs:
+ continue
+ self.trans_data[buf_idxs] = np.array(instance.poses_trans)
+ self.rot_data[buf_idxs] = np.array(instance.poses_rot)
if need_flow_info:
# this part could be expensive
@@ -1692,7 +1827,7 @@ def update_dynamic_positions(self, need_flow_info=False):
def use_pbr(self, use_pbr, use_pbr_mapping):
"""
- Apply PBR setting to every instance.
+ Apply PBR setting to every instance
:param use_pbr: whether to use pbr
:param use_pbr_mapping: whether to use pbr mapping
@@ -1703,7 +1838,7 @@ def use_pbr(self, use_pbr, use_pbr_mapping):
def setup_lidar_param(self):
"""
- Set up LiDAR params.
+ Set up LiDAR params
"""
lidar_vertical_low = -15 / 180.0 * np.pi
lidar_vertical_high = 15 / 180.0 * np.pi
@@ -1735,8 +1870,7 @@ def setup_lidar_param(self):
def get_lidar_from_depth(self):
"""
- Get partial LiDAR readings from depth sensors with limited FOV.
-
+ Get partial LiDAR readings from depth sensors with limited FOV
:return: partial LiDAR readings with limited FOV
"""
lidar_readings = self.render(modes=("3d"))[0]
@@ -1748,20 +1882,18 @@ def get_lidar_from_depth(self):
def get_lidar_all(self, offset_with_camera=np.array([0, 0, 0])):
"""
- Get complete LiDAR readings by patching together partial ones.
-
+ Get complete LiDAR readings by patching together partial ones
:param offset_with_camera: optionally place the lidar scanner
- with an offset to the camera
+ with an offset to the camera
:return: complete 360 degree LiDAR readings
"""
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- camera_pos = instance.ig_object.eyes.get_position() + offset_with_camera
- orn = instance.ig_object.eyes.get_orientation()
+ if isinstance(instance, Robot):
+ camera_pos = instance.robot.eyes.get_position() + offset_with_camera
+ orn = instance.robot.eyes.get_orientation()
mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3]
view_direction = mat.dot(np.array([1, 0, 0]))
- up_direction = mat.dot(np.array([0, 0, 1]))
- self.set_camera(camera_pos, camera_pos + view_direction, up_direction)
+ self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1])
original_fov = self.vertical_fov
self.set_fov(90)
@@ -1799,35 +1931,25 @@ def get_cube(self, mode="rgb", use_robot_camera=False):
"""
:param mode: simulator rendering mode, 'rgb' or '3d'
:param use_robot_camera: use the camera pose from robot
+
:return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras
"""
- # Cache the original fov and V to be restored later
- original_fov = self.vertical_fov
- original_V = np.copy(self.V)
-
- # Set fov to be 90 degrees
+ orig_fov = self.vertical_fov
self.set_fov(90)
+ org_V = np.copy(self.V)
- # Compute initial_V that will be used to render in 6 directions, based on whether use_robot_camera is True
if use_robot_camera:
for instance in self.instances:
- if isinstance(instance.ig_object, BaseRobot):
- camera_pos = instance.ig_object.eyes.get_position()
- orn = instance.ig_object.eyes.get_orientation()
+ if isinstance(instance, Robot):
+ camera_pos = instance.robot.eyes.get_position()
+ orn = instance.robot.eyes.get_orientation()
mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3]
view_direction = mat.dot(np.array([1, 0, 0]))
- up_direction = mat.dot(np.array([0, 0, 1]))
- self.set_camera(camera_pos, camera_pos + view_direction, up_direction)
- initial_V = np.copy(self.V)
- else:
- initial_V = original_V
+ self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1])
def render_cube():
- # Store 6 frames in 6 directions
frames = []
-
- # Forward, backward, left, right
r = np.array(
[
[
@@ -1849,45 +1971,30 @@ def render_cube():
# Up
r_up = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
- self.V = r_up.dot(initial_V)
+ self.V = r_up.dot(org_V)
frames.append(self.render(modes=(mode))[0])
- # Down
r_down = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
- self.V = r_down.dot(initial_V)
+ # Down
+ self.V = r_down.dot(org_V)
frames.append(self.render(modes=(mode))[0])
return frames
frames = render_cube()
-
- # Restore original fov and V
- self.V = original_V
- self.set_fov(original_fov)
-
+ self.V = org_V
+ self.set_fov(orig_fov)
return frames
def get_equi(self, mode="rgb", use_robot_camera=False):
"""
- Generate panorama images
:param mode: simulator rendering mode, 'rgb' or '3d'
:param use_robot_camera: use the camera pose from robot
:return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]
"""
frames = self.get_cube(mode=mode, use_robot_camera=use_robot_camera)
frames = [frames[0], frames[1][:, ::-1, :], frames[2][:, ::-1, :], frames[3], frames[4], frames[5]]
- try:
- equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=frames[0].shape[0] * 2, cube_format="list")
- except AssertionError:
- raise ValueError("Something went wrong during getting cubemap. Is the image size not a square?")
+ equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=frames[0].shape[0] * 2, cube_format="list")
return equi
-
- def update_instance_id_to_pb_id_map(self):
- self.instance_id_to_pb_id = np.full((MAX_INSTANCE_COUNT,), -1)
- for inst in self.instances:
- self.instance_id_to_pb_id[inst.id] = inst.pybullet_uuid if inst.pybullet_uuid is not None else -1
-
- def get_pb_ids_for_instance_ids(self, instance_ids):
- return self.instance_id_to_pb_id[instance_ids.astype(int)]
diff --git a/igibson/render/mesh_renderer/mesh_renderer_settings.py b/igibson/render/mesh_renderer/mesh_renderer_settings.py
index 3c4bf021c..99c20bf25 100644
--- a/igibson/render/mesh_renderer/mesh_renderer_settings.py
+++ b/igibson/render/mesh_renderer/mesh_renderer_settings.py
@@ -1,4 +1,3 @@
-import logging
import os
import platform
@@ -26,8 +25,6 @@ def __init__(
show_glfw_window=False,
blend_highlight=False,
is_robosuite=False,
- glsl_version_override=460,
- load_textures=True,
):
"""
:param use_fisheye: whether to use fisheye camera
@@ -48,26 +45,14 @@ def __init__(
:param show_glfw_window: whether to show glfw window (default false)
:param blend_highlight: blend highlight of objects into RGB image
:param is_robosuite: whether the environment is of robosuite.
- :param glsl_version_override: for backwards compatibility only. Options are 450 or 460.
- :param load_textures: Whether textures should be loaded. Set to False if not using RGB modality to save memory.
"""
self.use_fisheye = use_fisheye
self.msaa = msaa
+ self.enable_shadow = enable_shadow
self.env_texture_filename = env_texture_filename
self.env_texture_filename2 = env_texture_filename2
self.env_texture_filename3 = env_texture_filename3
-
- if platform.system() == "Darwin":
- if optimized:
- logging.warning("Darwin does not support optimized renderer, automatically disabling")
- if enable_shadow:
- logging.warning("Darwin does not support shadow, automatically disabling")
- self.optimized = False
- self.enable_shadow = False
- else:
- self.optimized = optimized
- self.enable_shadow = enable_shadow
-
+ self.optimized = optimized
self.skybox_size = skybox_size
self.light_modulation_map_filename = light_modulation_map_filename
self.light_dimming_factor = light_dimming_factor
@@ -78,8 +63,6 @@ def __init__(
self.show_glfw_window = show_glfw_window
self.blend_highlight = blend_highlight
self.is_robosuite = is_robosuite
- self.load_textures = load_textures
- self.glsl_version_override = glsl_version_override
if glfw_gl_version is not None:
self.glfw_gl_version = glfw_gl_version
@@ -87,7 +70,7 @@ def __init__(
if platform.system() == "Darwin":
self.glfw_gl_version = [4, 1]
else:
- self.glfw_gl_version = [4, 6]
+ self.glfw_gl_version = [4, 5]
def get_fastest(self):
self.msaa = False
diff --git a/igibson/render/mesh_renderer/mesh_renderer_tensor.py b/igibson/render/mesh_renderer/mesh_renderer_tensor.py
index cf1621c4b..ea36c880d 100644
--- a/igibson/render/mesh_renderer/mesh_renderer_tensor.py
+++ b/igibson/render/mesh_renderer/mesh_renderer_tensor.py
@@ -1,5 +1,4 @@
import logging
-import platform
from igibson.render.mesh_renderer.get_available_devices import get_cuda_device
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer, MeshRendererSettings
@@ -23,8 +22,6 @@ def __init__(
rendering_settings=MeshRendererSettings(),
simulator=None,
):
- if platform.system() != "Linux":
- raise Exception("Rendering to pytorch tensor is only available on Linux.")
super(MeshRendererG2G, self).__init__(
width, height, vertical_fov, device_idx, rendering_settings, simulator
)
@@ -33,8 +30,7 @@ def __init__(
with torch.cuda.device(self.cuda_idx):
self.image_tensor = torch.cuda.ByteTensor(height, width, 4).cuda()
self.normal_tensor = torch.cuda.ByteTensor(height, width, 4).cuda()
- self.seg_tensor = torch.cuda.FloatTensor(height, width, 4).cuda()
- self.ins_seg_tensor = torch.cuda.FloatTensor(height, width, 4).cuda()
+ self.seg_tensor = torch.cuda.ByteTensor(height, width, 4).cuda()
self.pc_tensor = torch.cuda.FloatTensor(height, width, 4).cuda()
self.optical_flow_tensor = torch.cuda.FloatTensor(height, width, 4).cuda()
self.scene_flow_tensor = torch.cuda.FloatTensor(height, width, 4).cuda()
@@ -65,14 +61,6 @@ def readbuffer_to_tensor(self, modes=AVAILABLE_MODALITIES):
int(self.color_tex_semantics), int(self.width), int(self.height), self.seg_tensor.data_ptr()
)
results.append(self.seg_tensor.clone())
- elif mode == "ins_seg":
- self.r.map_tensor(
- int(self.color_tex_ins_seg),
- int(self.width),
- int(self.height),
- self.ins_seg_tensor.data_ptr(),
- )
- results.append(self.ins_seg_tensor.clone())
elif mode == "3d":
self.r.map_tensor_float(
int(self.color_tex_3d), int(self.width), int(self.height), self.pc_tensor.data_ptr()
@@ -97,7 +85,7 @@ def readbuffer_to_tensor(self, modes=AVAILABLE_MODALITIES):
return results
- def render(self, modes=AVAILABLE_MODALITIES, hidden=(), render_shadow_pass=True):
+ def render(self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True):
"""
A function to render all the instances in the renderer and read the output from framebuffer into pytorch tensor.
@@ -110,6 +98,7 @@ def render(self, modes=AVAILABLE_MODALITIES, hidden=(), render_shadow_pass=True)
)
return self.readbuffer_to_tensor(modes)
+
except ImportError:
- logging.warning("torch is not available, falling back to rendering to memory (instead of tensor)")
+ print("torch is not available, falling back to rendering to memory(instead of tensor)")
MeshRendererG2G = MeshRenderer
diff --git a/igibson/render/mesh_renderer/mesh_renderer_vr.py b/igibson/render/mesh_renderer/mesh_renderer_vr.py
index d77e1e58f..ee7e9086e 100644
--- a/igibson/render/mesh_renderer/mesh_renderer_vr.py
+++ b/igibson/render/mesh_renderer/mesh_renderer_vr.py
@@ -2,7 +2,6 @@
import time
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer, MeshRendererSettings
-from igibson.utils.constants import AVAILABLE_MODALITIES
from igibson.utils.utils import dump_config, parse_config, parse_str_config
@@ -262,10 +261,7 @@ def __init__(self, rendering_settings=MeshRendererSettings(), vr_settings=VrSett
self.width = 1296
self.height = 1440
super().__init__(
- width=self.width,
- height=self.height,
- rendering_settings=self.vr_rendering_settings,
- simulator=simulator,
+ width=self.width, height=self.height, rendering_settings=self.vr_rendering_settings, simulator=simulator
)
# Rename self.r to self.vrsys
@@ -278,6 +274,7 @@ def __init__(self, rendering_settings=MeshRendererSettings(), vr_settings=VrSett
# Always turn MSAA off for VR
self.msaa = False
+ # The VrTextOverlay that serves as the VR HUD (heads-up-display)
self.vr_hud = None
def gen_vr_hud(self):
@@ -304,9 +301,7 @@ def update_vr_data(self):
"""
self.vrsys.updateVRData()
- def render(
- self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True, render_text_pass=True
- ):
+ def render(self, return_frame=False):
"""
Renders VR scenes.
"""
@@ -323,7 +318,7 @@ def render(
[right_cam_pos[0], right_cam_pos[1], 10], [right_cam_pos[0], right_cam_pos[1], 0]
)
- super().render(modes=("rgb"), return_buffer=False)
+ super().render(modes=("rgb"), return_buffer=False, render_shadow_pass=True)
self.vrsys.postRenderVRForEye("left", self.color_tex_rgb)
# Render and submit right eye
self.V = right_view
@@ -336,10 +331,13 @@ def render(
self.vrsys.postRenderVRForEye("right", self.color_tex_rgb)
# Update HUD so it renders in the HMD
- if self.vr_hud is not None:
+ if self.vr_hud:
self.vr_hud.refresh_text()
else:
- return super().render(modes=("rgb"), return_buffer=return_buffer)
+ if return_frame:
+ return super().render(modes=("rgb"), return_buffer=return_frame, render_shadow_pass=True)
+ else:
+ super().render(modes=("rgb"), return_buffer=False, render_shadow_pass=True)
def vr_compositor_update(self):
"""
diff --git a/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader b/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader
index 043f57d17..4c3636b6e 100644
--- a/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader
+++ b/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader
@@ -1,4 +1,4 @@
-#version 450
+#version 460
#define MAX_ARRAY_SIZE 1024
@@ -26,8 +26,10 @@ layout (std140) uniform UVData {
vec4 uv_transform_param[MAX_ARRAY_SIZE];
};
-
-in int gl_DrawID;
+// Commenting out this line (and changing the version to 460)
+// allows the OpenGL to compile this regardless of the
+// Nvidia driver version
+// in int gl_DrawID;
uniform mat4 V;
uniform mat4 last_V;
diff --git a/igibson/render/mesh_renderer/shaders/460/equirect2cube_cs.glsl b/igibson/render/mesh_renderer/shaders/460/equirect2cube_cs.glsl
deleted file mode 100644
index 3de1d2398..000000000
--- a/igibson/render/mesh_renderer/shaders/460/equirect2cube_cs.glsl
+++ /dev/null
@@ -1,55 +0,0 @@
-#version 460 core
-// Physically Based Rendering
-// Copyright (c) 2017-2018 Michał Siejak
-
-// Converts equirectangular (lat-long) projection texture into a proper cubemap.
-
-const float PI = 3.141592;
-const float TwoPI = 2 * PI;
-uniform float light_dimming_factor;
-
-#if VULKAN
-layout(set=0, binding=0) uniform sampler2D inputTexture;
-layout(set=0, binding=1, rgba16f) restrict writeonly uniform imageCube outputTexture;
-#else
-layout(binding=0) uniform sampler2D inputTexture;
-layout(binding=0, rgba16f) restrict writeonly uniform imageCube outputTexture;
-#endif // VULKAN
-
-// Calculate normalized sampling direction vector based on current fragment coordinates (gl_GlobalInvocationID.xyz).
-// This is essentially "inverse-sampling": we reconstruct what the sampling vector would be if we wanted it to "hit"
-// this particular fragment in a cubemap.
-// See: OpenGL core profile specs, section 8.13.
-vec3 getSamplingVector()
-{
- vec2 st = gl_GlobalInvocationID.xy/vec2(imageSize(outputTexture));
- vec2 uv = 2.0 * vec2(st.x, 1.0-st.y) - vec2(1.0);
-
- vec3 ret;
- // Select vector based on cubemap face index.
- // Sadly 'switch' doesn't seem to work, at least on NVIDIA.
- if(gl_GlobalInvocationID.z == 0) ret = vec3(1.0, uv.y, -uv.x);
- else if(gl_GlobalInvocationID.z == 1) ret = vec3(-1.0, uv.y, uv.x);
- else if(gl_GlobalInvocationID.z == 2) ret = vec3(uv.x, 1.0, -uv.y);
- else if(gl_GlobalInvocationID.z == 3) ret = vec3(uv.x, -1.0, uv.y);
- else if(gl_GlobalInvocationID.z == 4) ret = vec3(uv.x, uv.y, 1.0);
- else if(gl_GlobalInvocationID.z == 5) ret = vec3(-uv.x, uv.y, -1.0);
- return normalize(ret);
-}
-
-layout(local_size_x=32, local_size_y=32, local_size_z=1) in;
-void main(void)
-{
- vec3 v = getSamplingVector();
-
- // Convert Cartesian direction vector to spherical coordinates.
- float phi = atan(v.y, v.x); // here we use z-up convention, so y and z are swapped from original code
- float theta = acos(v.z); // here we use z-up convention, so y and z are swapped from original code
-
- // Sample equirectangular texture.
- vec4 color = texture(inputTexture, vec2(phi/TwoPI, theta/PI));
- color.xyz = color.xyz * light_dimming_factor;
-
- // Write out color to output cubemap.
- imageStore(outputTexture, ivec3(gl_GlobalInvocationID), color);
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/frag.shader b/igibson/render/mesh_renderer/shaders/460/frag.shader
deleted file mode 100644
index 7cf906c49..000000000
--- a/igibson/render/mesh_renderer/shaders/460/frag.shader
+++ /dev/null
@@ -1,232 +0,0 @@
-#version 460
-uniform sampler2D texUnit;
-uniform sampler2D metallicTexture;
-uniform sampler2D roughnessTexture;
-uniform sampler2D normalTexture;
-
-uniform samplerCube specularTexture;
-uniform samplerCube irradianceTexture;
-uniform sampler2D specularBRDF_LUT;
-
-uniform samplerCube specularTexture2;
-uniform samplerCube irradianceTexture2;
-uniform sampler2D specularBRDF_LUT2;
-
-uniform sampler2D lightModulationMap;
-
-uniform vec3 eyePosition;
-
-uniform float use_texture;
-uniform float use_pbr;
-uniform float use_pbr_mapping;
-uniform float use_two_light_probe;
-uniform float metallic;
-uniform float roughness;
-
-uniform sampler2D depthMap;
-uniform int shadow_pass;
-
-in vec2 theCoords;
-in vec3 Normal_world;
-in vec3 Normal_cam;
-in vec3 FragPos;
-in vec3 Semantic_seg_color;
-in vec3 Instance_seg_color;
-in vec3 Pos_cam;
-in vec3 Pos_cam_prev;
-in vec3 Pos_cam_projected;
-in vec3 Diffuse_color;
-in mat3 TBN;
-in vec4 FragPosLightSpace;
-in vec2 Optical_flow;
-
-const float PI = 3.141592;
-const float Epsilon = 0.00001;
-
-layout (location = 0) out vec4 outputColour;
-layout (location = 1) out vec4 NormalColour;
-layout (location = 2) out vec4 SemanticSegColour;
-layout (location = 3) out vec4 InstanceSegColour;
-layout (location = 4) out vec4 PCColour;
-layout (location = 5) out vec4 SceneFlowColour;
-layout (location = 6) out vec4 OpticalFlowColour;
-
-uniform vec3 light_position; // in world coordinate
-uniform vec3 light_color; // light color
-float ndfGGX(float cosLh, float roughness)
-{
- float alpha = roughness * roughness;
- float alphaSq = alpha * alpha;
-
- float denom = (cosLh * cosLh) * (alphaSq - 1.0) + 1.0;
- return alphaSq / (PI * denom * denom);
-}
-
-// Single term for separable Schlick-GGX below.
-float gaSchlickG1(float cosTheta, float k)
-{
- return cosTheta / (cosTheta * (1.0 - k) + k);
-}
-
-// Schlick-GGX approximation of geometric attenuation function using Smith's method.
-float gaSchlickGGX(float cosLi, float cosLo, float roughness)
-{
- float r = roughness + 1.0;
- float k = (r * r) / 8.0; // Epic suggests using this roughness remapping for analytic lights.
- return gaSchlickG1(cosLi, k) * gaSchlickG1(cosLo, k);
-}
-
-// Shlick's approximation of the Fresnel factor.
-vec3 fresnelSchlick(vec3 F0, float cosTheta)
-{
- return F0 + (vec3(1.0) - F0) * pow(1.0 - cosTheta, 5.0);
-}
-
-void main() {
- vec3 lightDir = vec3(0,0,1);//normalize(light_position);
- //sunlight pointing to z direction
- float diff = 0.5 + 0.5 * max(dot(Normal_world, lightDir), 0.0);
- vec3 diffuse = diff * light_color;
- vec2 texelSize = 1.0 / textureSize(depthMap, 0);
-
- float shadow;
- if (shadow_pass == 2) {
- vec3 projCoords = FragPosLightSpace.xyz / FragPosLightSpace.w;
- projCoords = projCoords * 0.5 + 0.5;
-
- float cosTheta = dot(Normal_world, lightDir);
- cosTheta = clamp(cosTheta, 0.0, 1.0);
- float bias = 0.005*tan(acos(cosTheta));
- bias = clamp(bias, 0.001 ,0.1);
- float currentDepth = projCoords.z;
- float closestDepth = 0;
-
- shadow = 0.0;
- float current_shadow = 0;
-
- for(int x = -2; x <= 2; ++x)
- {
- for (int y = -2; y <= 2; ++y)
- {
- closestDepth = texture(depthMap, projCoords.xy + vec2(x, y) * texelSize).b * 0.5 + 0.5;
- current_shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
- if ((projCoords.z > 1.0) || (projCoords.x > 1.0) || (projCoords.y > 1.0)
- || (projCoords.x < 0) || (projCoords.y < 0)) current_shadow = 0.0;
- shadow += current_shadow;
- }
- }
- shadow /= 25.0;
- }
- else {
- shadow = 0.0;
- }
-
- //not using pbr
- if (use_pbr == 0) {
- if (use_texture == 1) {
- outputColour = texture(texUnit, theCoords);// albedo only
- } else {
- outputColour = vec4(Diffuse_color,1) * diff; //diffuse color
- }
- }
-
- //use pbr, not using mapping
- if ((use_pbr == 1) && (use_pbr_mapping == 0)) {
-
- vec3 N = normalize(Normal_world);
- vec3 Lo = normalize(eyePosition - FragPos);
- float cosLo = max(0.0, dot(N, Lo));
- vec3 Lr = 2.0 * cosLo * N - Lo;
-
- vec3 albedo;
- if (use_texture == 1) {
- albedo = texture(texUnit, theCoords).rgb;// albedo only
- } else {
- albedo = Diffuse_color; //diffuse color
- }
-
- const vec3 Fdielectric = vec3(0.04);
- vec3 F0 = mix(Fdielectric, albedo, metallic);
- vec3 irradiance = texture(irradianceTexture, N).rgb;
- vec3 F = fresnelSchlick(F0, cosLo);
- vec3 kd = mix(vec3(1.0) - F, vec3(0.0), metallic);
- vec3 diffuseIBL = kd * albedo * irradiance;
- int specularTextureLevels = textureQueryLevels(specularTexture);
- vec3 specularIrradiance = textureLod(specularTexture, Lr, roughness * specularTextureLevels).rgb;
- vec2 specularBRDF = texture(specularBRDF_LUT, vec2(cosLo, roughness)).rg;
- vec3 specularIBL = (F0 * specularBRDF.x + specularBRDF.y) * specularIrradiance;
- vec3 ambientLighting = diffuseIBL + specularIBL;
- //vec3 reflection = textureLod(specularTexture, vec3(Lr.x, Lr.z, Lr.y), 1).rgb;
- outputColour = vec4(ambientLighting, 1);
-
- }
-
- // use pbr and mapping
- if ((use_pbr == 1) && (use_pbr_mapping == 1)) {
- vec3 normal_map = 2 * texture(normalTexture, theCoords).rgb - 1;
- mat3 TBN2 = TBN;
- if (!gl_FrontFacing) {
- TBN2 = mat3(TBN[0], TBN[1], -TBN[2]);
- }
- vec3 N = normalize(TBN2 * normal_map);
-
- vec3 Lo = normalize(eyePosition - FragPos);
- float cosLo = max(0.0, dot(N, Lo));
- vec3 Lr = 2.0 * cosLo * N - Lo;
-
- vec3 albedo;
- if (use_texture == 1) {
- albedo = texture(texUnit, theCoords).rgb;// albedo only
- } else {
- albedo = Diffuse_color; //diffuse color
- }
-
- float metallic_sampled = texture(metallicTexture, theCoords).r;
- float roughness_sampled = texture(roughnessTexture, theCoords).r;
- const vec3 Fdielectric = vec3(0.04);
- vec3 F0 = mix(Fdielectric, albedo, metallic_sampled);
- vec3 irradiance;
-
- float modulate_factor;
-
- if (use_two_light_probe == 1) {
- vec4 room = texture(lightModulationMap, vec2((FragPos.x + 15.0)/30.0,
- (FragPos.y + 15.0)/30.0));
- modulate_factor = room.r;
- } else {
- modulate_factor = 1.0;
- }
- irradiance = texture(irradianceTexture, N).rgb * modulate_factor +
- (1-modulate_factor) * texture(irradianceTexture2, N).rgb;
- vec3 F = fresnelSchlick(F0, cosLo);
- vec3 kd = mix(vec3(1.0) - F, vec3(0.0), metallic_sampled);
- vec3 diffuseIBL = kd * albedo * irradiance;
- int specularTextureLevels = textureQueryLevels(specularTexture);
- int specularTextureLevels2 = textureQueryLevels(specularTexture2);
- vec3 specularIrradiance;
- vec2 specularBRDF;
- specularIrradiance = textureLod(specularTexture, Lr, roughness_sampled * specularTextureLevels).rgb *
- modulate_factor + textureLod(specularTexture2, Lr, roughness_sampled * specularTextureLevels2).rgb *
- (1-modulate_factor);
-
- specularBRDF = texture(specularBRDF_LUT, vec2(cosLo, roughness_sampled)).rg * modulate_factor +
- texture(specularBRDF_LUT2, vec2(cosLo, roughness_sampled)).rg * (1-modulate_factor);
- vec3 specularIBL = (F0 * specularBRDF.x + specularBRDF.y) * specularIrradiance;
- vec3 ambientLighting = diffuseIBL + specularIBL;
- //vec3 reflection = textureLod(specularTexture, vec3(Lr.x, Lr.z, Lr.y), 1).rgb;
- outputColour = vec4(ambientLighting, 1);
- }
-
- NormalColour = vec4((Normal_cam + 1) / 2,1);
- SemanticSegColour = vec4(Semantic_seg_color, 1);
- InstanceSegColour = vec4(Instance_seg_color, 1);
- if (shadow_pass == 1) {
- PCColour = vec4(Pos_cam_projected, 1);
- } else {
- PCColour = vec4(Pos_cam, 1);
- }
- outputColour = outputColour * (1 - shadow * 0.5);
- SceneFlowColour = vec4(Pos_cam - Pos_cam_prev,1);
- OpticalFlowColour = vec4(Optical_flow,0,1);
-
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/irmap_cs.glsl b/igibson/render/mesh_renderer/shaders/460/irmap_cs.glsl
deleted file mode 100644
index 6e46e9ce2..000000000
--- a/igibson/render/mesh_renderer/shaders/460/irmap_cs.glsl
+++ /dev/null
@@ -1,106 +0,0 @@
-#version 460 core
-// Physically Based Rendering
-// Copyright (c) 2017-2018 Michał Siejak
-
-// Computes diffuse irradiance cubemap convolution for image-based lighting.
-// Uses quasi Monte Carlo sampling with Hammersley sequence.
-
-const float PI = 3.141592;
-const float TwoPI = 2 * PI;
-const float Epsilon = 0.00001;
-
-const uint NumSamples = 64 * 1024;
-const float InvNumSamples = 1.0 / float(NumSamples);
-
-layout(binding=0) uniform samplerCube inputTexture;
-layout(binding=0, rgba16f) restrict writeonly uniform imageCube outputTexture;
-
-// Compute Van der Corput radical inverse
-// See: http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html
-float radicalInverse_VdC(uint bits)
-{
- bits = (bits << 16u) | (bits >> 16u);
- bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
- bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
- bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
- bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
- return float(bits) * 2.3283064365386963e-10; // / 0x100000000
-}
-
-// Sample i-th point from Hammersley point set of NumSamples points total.
-vec2 sampleHammersley(uint i)
-{
- return vec2(i * InvNumSamples, radicalInverse_VdC(i));
-}
-
-// Uniformly sample point on a hemisphere.
-// Cosine-weighted sampling would be a better fit for Lambertian BRDF but since this
-// compute shader runs only once as a pre-processing step performance is not *that* important.
-// See: "Physically Based Rendering" 2nd ed., section 13.6.1.
-vec3 sampleHemisphere(float u1, float u2)
-{
- const float u1p = sqrt(max(0.0, 1.0 - u1*u1));
- return vec3(cos(TwoPI*u2) * u1p, sin(TwoPI*u2) * u1p, u1);
-}
-
-// Calculate normalized sampling direction vector based on current fragment coordinates (gl_GlobalInvocationID.xyz).
-// This is essentially "inverse-sampling": we reconstruct what the sampling vector would be if we wanted it to "hit"
-// this particular fragment in a cubemap.
-// See: OpenGL core profile specs, section 8.13.
-vec3 getSamplingVector()
-{
- vec2 st = gl_GlobalInvocationID.xy/vec2(imageSize(outputTexture));
- vec2 uv = 2.0 * vec2(st.x, 1.0-st.y) - vec2(1.0);
-
- vec3 ret;
- // Sadly 'switch' doesn't seem to work, at least on NVIDIA.
- if(gl_GlobalInvocationID.z == 0) ret = vec3(1.0, uv.y, -uv.x);
- else if(gl_GlobalInvocationID.z == 1) ret = vec3(-1.0, uv.y, uv.x);
- else if(gl_GlobalInvocationID.z == 2) ret = vec3(uv.x, 1.0, -uv.y);
- else if(gl_GlobalInvocationID.z == 3) ret = vec3(uv.x, -1.0, uv.y);
- else if(gl_GlobalInvocationID.z == 4) ret = vec3(uv.x, uv.y, 1.0);
- else if(gl_GlobalInvocationID.z == 5) ret = vec3(-uv.x, uv.y, -1.0);
- return normalize(ret);
-}
-
-// Compute orthonormal basis for converting from tanget/shading space to world space.
-void computeBasisVectors(const vec3 N, out vec3 S, out vec3 T)
-{
- // Branchless select non-degenerate T.
- T = cross(N, vec3(0.0, 1.0, 0.0));
- T = mix(cross(N, vec3(1.0, 0.0, 0.0)), T, step(Epsilon, dot(T, T)));
-
- T = normalize(T);
- S = normalize(cross(N, T));
-}
-
-// Convert point from tangent/shading space to world space.
-vec3 tangentToWorld(const vec3 v, const vec3 N, const vec3 S, const vec3 T)
-{
- return S * v.x + T * v.y + N * v.z;
-}
-
-layout(local_size_x=32, local_size_y=32, local_size_z=1) in;
-void main(void)
-{
- vec3 N = getSamplingVector();
-
- vec3 S, T;
- computeBasisVectors(N, S, T);
-
- // Monte Carlo integration of hemispherical irradiance.
- // As a small optimization this also includes Lambertian BRDF assuming perfectly white surface (albedo of 1.0)
- // so we don't need to normalize in PBR fragment shader (so technically it encodes exitant radiance rather than irradiance).
- vec3 irradiance = vec3(0);
- for(uint i=0; i closestDepth ? 1.0 : 0.0;
- if ((projCoords.z > 1.0) || (projCoords.x > 1.0) || (projCoords.y > 1.0)
- || (projCoords.x < 0) || (projCoords.y < 0)) current_shadow = 0.0;
- shadow += current_shadow;
- }
- }
- shadow /= 25.0;
- }
- else {
- shadow = 0.0;
- }
-
- if (use_pbr == 1) {
- int normal_tex_num = int(tex_normal_data[Draw_id].x);
- int normal_tex_layer = int(tex_normal_data[Draw_id].y);
- vec3 normal_map;
- if (normal_tex_num == -1) {
- normal_map = 2 * texture(defaultNormalTexture, theCoords).rgb - 1;
- } else if (normal_tex_num == 0) {
- normal_map = 2 * texture(bigTex, vec3(theCoords.x, theCoords.y, normal_tex_layer)).rgb - 1;
- } else {
- normal_map = 2 * texture(smallTex, vec3(theCoords.x, theCoords.y, normal_tex_layer)).rgb - 1;
- }
- vec3 N = normalize(TBN * normal_map);
- vec3 Lo = normalize(eyePosition - FragPos);
- float cosLo = max(0.0, dot(N, Lo));
- vec3 Lr = 2.0 * cosLo * N - Lo;
-
- vec3 albedo;
- if (tex_num == -1) {
- albedo = diffuse_colors[Draw_id].rgb;//diffuse color
- } else if (tex_num == 0) {
- albedo = texture(bigTex, vec3(theCoords.x, theCoords.y, tex_layer)).rgb;
- } else if (tex_num == 1) {
- albedo = texture(smallTex, vec3(theCoords.x, theCoords.y, tex_layer)).rgb;
- }
-
- int roughness_tex_num = int(tex_roughness_metallic_data[Draw_id].x);
- int roughness_tex_layer = int(tex_roughness_metallic_data[Draw_id].y);
- float roughness_sampled;
- if (roughness_tex_num == -1) {
- roughness_sampled = texture(defaultRoughnessTexture, theCoords).r;
- } else if (roughness_tex_num == 0) {
- roughness_sampled = texture(bigTex, vec3(theCoords.x, theCoords.y, roughness_tex_layer)).r;
- } else {
- roughness_sampled = texture(smallTex, vec3(theCoords.x, theCoords.y, roughness_tex_layer)).r;
- }
-
- int metallic_tex_num = int(tex_roughness_metallic_data[Draw_id].z);
- int metallic_tex_layer = int(tex_roughness_metallic_data[Draw_id].w);
- float metallic_sampled;
- if (metallic_tex_num == -1) {
- metallic_sampled = texture(defaultMetallicTexture, theCoords).r;
- } else if (metallic_tex_num == 0) {
- metallic_sampled = texture(bigTex, vec3(theCoords.x, theCoords.y, metallic_tex_layer)).r;
- } else {
- metallic_sampled = texture(smallTex, vec3(theCoords.x, theCoords.y, metallic_tex_layer)).r;
- }
-
- vec3 Fdielectric = vec3(0.04);
- vec3 F0 = mix(Fdielectric, albedo, metallic_sampled);
- vec3 irradiance = texture(irradianceTexture, N).rgb;
- vec3 F = fresnelSchlick(F0, cosLo);
- vec3 kd = mix(vec3(1.0) - F, vec3(0.0), metallic_sampled);
- vec3 diffuseIBL = kd * albedo * irradiance;
- int specularTextureLevels = textureQueryLevels(specularTexture);
- vec3 specularIrradiance = textureLod(specularTexture, Lr, roughness_sampled * specularTextureLevels).rgb;
- vec2 specularBRDF = texture(specularBRDF_LUT, vec2(cosLo, roughness_sampled)).rg;
- vec3 specularIBL = (F0 * specularBRDF.x + specularBRDF.y) * specularIrradiance;
- vec3 ambientLighting = diffuseIBL + specularIBL;
- //vec3 reflection = textureLod(specularTexture, vec3(Lr.x, Lr.z, Lr.y), 1).rgb;
- outputColour = vec4(ambientLighting, 1);
-
- }
- else {
- if (tex_num == -1) {
- outputColour = diffuse_colors[Draw_id] * diff; //diffuse color
- } else if (tex_num == 0) {
- outputColour = texture(bigTex, vec3(theCoords.x, theCoords.y, tex_layer));
- } else if (tex_num == 1) {
- outputColour = texture(smallTex, vec3(theCoords.x, theCoords.y, tex_layer));
- }
- }
- NormalColour = vec4((Normal_cam + 1) / 2,1);
- SemanticSegColour = vec4(semantic_seg_color, 0, 0, 1);
- InstanceSegColour = vec4(instance_seg_color, 0, 0, 1);
- if (shadow_pass == 1) {
- PCColour = vec4(Pos_cam_projected, 1);
- } else {
- PCColour = vec4(Pos_cam, 1);
- }
- outputColour = outputColour * (1 - shadow * 0.5);
-
- if (blend_highlight == 1) {
- outputColour = outputColour * (1-highlight * 0.5) + vec4(1, 0, 1, 1) * highlight * 0.5;
- }
- outputColour.w = highlight; // put highlight into alpha channel
-
- SceneFlowColour = vec4(Pos_cam - Pos_cam_prev,1);
- OpticalFlowColour = vec4(Optical_flow,0,1);
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/optimized_vert.shader b/igibson/render/mesh_renderer/shaders/460/optimized_vert.shader
deleted file mode 100644
index b586559aa..000000000
--- a/igibson/render/mesh_renderer/shaders/460/optimized_vert.shader
+++ /dev/null
@@ -1,119 +0,0 @@
-#version 460
-
-#define MAX_ARRAY_SIZE 1024
-
-layout (std140) uniform TransformDataTrans {
- mat4 pose_trans_array[MAX_ARRAY_SIZE];
-};
-
-layout (std140) uniform TransformDataRot {
- mat4 pose_rot_array[MAX_ARRAY_SIZE];
-};
-
-layout (std140) uniform TransformDataLastTrans {
- mat4 pose_last_trans_array[MAX_ARRAY_SIZE];
-};
-
-layout (std140) uniform TransformDataLastRot {
- mat4 pose_last_rot_array[MAX_ARRAY_SIZE];
-};
-
-layout (std140) uniform Hidden {
- vec4 hidden_array[MAX_ARRAY_SIZE];
-};
-
-layout (std140) uniform UVData {
- vec4 uv_transform_param[MAX_ARRAY_SIZE];
-};
-
-
-uniform mat4 V;
-uniform mat4 last_V;
-uniform mat4 lightV;
-uniform mat4 P;
-uniform mat4 lightP;
-
-uniform vec3 semantic_seg_color;
-uniform vec3 instance_seg_color;
-uniform vec3 diffuse_color;
-uniform int shadow_pass;
-
-layout (location=0) in vec3 position;
-layout (location=1) in vec3 normal;
-layout (location=2) in vec2 texCoords;
-layout (location=3) in vec3 tangent;
-layout (location=4) in vec3 bitangent;
-
-out vec2 theCoords;
-out vec3 Normal_world;
-out vec3 FragPos;
-out vec3 Normal_cam;
-out vec3 Semantic_seg_color;
-out vec3 Instance_seg_color;
-out vec3 Pos_cam;
-out vec3 Pos_cam_prev;
-out vec3 Pos_cam_projected;
-out vec3 Diffuse_color;
-out mat3 TBN;
-flat out int Draw_id;
-out vec4 FragPosLightSpace;
-out vec2 Optical_flow;
-
-void main() {
- mat4 pose_trans = pose_trans_array[gl_DrawID];
- mat4 pose_rot = transpose(pose_rot_array[gl_DrawID]);
- mat4 last_trans = pose_last_trans_array[gl_DrawID];
- mat4 last_rot = transpose(pose_last_rot_array[gl_DrawID]);
- // Hidden is stored in the x element of the vector - y, z and w are currently unused and set to 1.0
- float hidden = hidden_array[gl_DrawID].x;
- vec4 world_position4;
- vec4 pos_cam4_projected;
- vec4 pos_cam4;
- vec4 Position_prev;
- vec4 pos_cam_prev4;
-
- if (shadow_pass == 1) {
- gl_Position = lightP * lightV * pose_trans * pose_rot * vec4(position, 1) * (1 - hidden);
- world_position4 = pose_trans * pose_rot * vec4(position, 1);
- FragPos = vec3(world_position4.xyz / world_position4.w);// in world coordinate
- Normal_world = normalize(mat3(pose_rot) * normal);// in world coordinate
- Normal_cam = normalize(mat3(lightV) * mat3(pose_rot) * normal);// in camera coordinate
- pos_cam4_projected = lightP * lightV * pose_trans * pose_rot * vec4(position, 1);
- pos_cam4 = lightV * pose_trans * pose_rot * vec4(position, 1);
-
- Optical_flow = vec2(0,0);
- Position_prev = vec4(0,0,0,0);
- pos_cam_prev4 = vec4(0,0,0,0);
- Pos_cam_prev = vec3(0,0,0);
- } else {
- gl_Position = P * V * pose_trans * pose_rot * vec4(position, 1) * (1 - hidden);
- Position_prev = P * last_V * last_trans * last_rot * vec4(position, 1);
- Optical_flow = gl_Position.xy/gl_Position.w - Position_prev.xy/Position_prev.w;
- world_position4 = pose_trans * pose_rot * vec4(position, 1);
- FragPos = vec3(world_position4.xyz / world_position4.w);// in world coordinate
- Normal_world = normalize(mat3(pose_rot) * normal);// in world coordinate
- Normal_cam = normalize(mat3(V) * mat3(pose_rot) * normal);// in camera coordinate
- pos_cam4_projected = P * V * pose_trans * pose_rot * vec4(position, 1);
- pos_cam4 = V * pose_trans * pose_rot * vec4(position, 1);
- pos_cam_prev4 = last_V * last_trans * last_rot * vec4(position, 1);
- Pos_cam_prev = pos_cam_prev4.xyz / pos_cam_prev4.w;
- }
-
- Pos_cam = pos_cam4.xyz / pos_cam4.w;
- Pos_cam_projected = pos_cam4_projected.xyz / pos_cam4_projected.w;
-
-
- theCoords.x = (cos(uv_transform_param[gl_DrawID][2]) * texCoords.x * uv_transform_param[gl_DrawID][0])
- - (sin(uv_transform_param[gl_DrawID][2]) * texCoords.y * uv_transform_param[gl_DrawID][1]);
- theCoords.y = (sin(uv_transform_param[gl_DrawID][2]) * texCoords.x * uv_transform_param[gl_DrawID][0])
- + (cos(uv_transform_param[gl_DrawID][2]) * texCoords.y * uv_transform_param[gl_DrawID][1]);
- Semantic_seg_color = semantic_seg_color;
- Instance_seg_color = instance_seg_color;
- Diffuse_color = diffuse_color;
- vec3 T = normalize(vec3(pose_trans * pose_rot * vec4(tangent, 0.0)));
- vec3 B = normalize(vec3(pose_trans * pose_rot * vec4(bitangent, 0.0)));
- vec3 N = normalize(vec3(pose_trans * pose_rot * vec4(normal, 0.0)));
- TBN = mat3(T, B, N);
- FragPosLightSpace = lightP * lightV * pose_trans * pose_rot * vec4(position, 1);
- Draw_id = gl_DrawID;
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/spbrdf_cs.glsl b/igibson/render/mesh_renderer/shaders/460/spbrdf_cs.glsl
deleted file mode 100644
index cebbef1e5..000000000
--- a/igibson/render/mesh_renderer/shaders/460/spbrdf_cs.glsl
+++ /dev/null
@@ -1,108 +0,0 @@
-#version 460 core
-// Physically Based Rendering
-// Copyright (c) 2017-2018 Michał Siejak
-
-// Pre-integrates Cook-Torrance specular BRDF for varying roughness and viewing directions.
-// Results are saved into 2D LUT texture in the form of DFG1 and DFG2 split-sum approximation terms,
-// which act as a scale and bias to F0 (Fresnel reflectance at normal incidence) during rendering.
-
-const float PI = 3.141592;
-const float TwoPI = 2 * PI;
-const float Epsilon = 0.001; // This program needs larger eps.
-
-const uint NumSamples = 1024;
-const float InvNumSamples = 1.0 / float(NumSamples);
-
-layout(binding=0, rg16f) restrict writeonly uniform image2D LUT;
-
-// Compute Van der Corput radical inverse
-// See: http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html
-float radicalInverse_VdC(uint bits)
-{
- bits = (bits << 16u) | (bits >> 16u);
- bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
- bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
- bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
- bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
- return float(bits) * 2.3283064365386963e-10; // / 0x100000000
-}
-
-// Sample i-th point from Hammersley point set of NumSamples points total.
-vec2 sampleHammersley(uint i)
-{
- return vec2(i * InvNumSamples, radicalInverse_VdC(i));
-}
-
-// Importance sample GGX normal distribution function for a fixed roughness value.
-// This returns normalized half-vector between Li & Lo.
-// For derivation see: http://blog.tobias-franke.eu/2014/03/30/notes_on_importance_sampling.html
-vec3 sampleGGX(float u1, float u2, float roughness)
-{
- float alpha = roughness * roughness;
-
- float cosTheta = sqrt((1.0 - u2) / (1.0 + (alpha*alpha - 1.0) * u2));
- float sinTheta = sqrt(1.0 - cosTheta*cosTheta); // Trig. identity
- float phi = TwoPI * u1;
-
- // Convert to Cartesian upon return.
- return vec3(sinTheta * cos(phi), sinTheta * sin(phi), cosTheta);
-}
-
-// Single term for separable Schlick-GGX below.
-float gaSchlickG1(float cosTheta, float k)
-{
- return cosTheta / (cosTheta * (1.0 - k) + k);
-}
-
-// Schlick-GGX approximation of geometric attenuation function using Smith's method (IBL version).
-float gaSchlickGGX_IBL(float cosLi, float cosLo, float roughness)
-{
- float r = roughness;
- float k = (r * r) / 2.0; // Epic suggests using this roughness remapping for IBL lighting.
- return gaSchlickG1(cosLi, k) * gaSchlickG1(cosLo, k);
-}
-
-layout(local_size_x=32, local_size_y=32, local_size_z=1) in;
-void main(void)
-{
- // Get integration parameters.
- float cosLo = gl_GlobalInvocationID.x / float(imageSize(LUT).x);
- float roughness = gl_GlobalInvocationID.y / float(imageSize(LUT).y);
-
- // Make sure viewing angle is non-zero to avoid divisions by zero (and subsequently NaNs).
- cosLo = max(cosLo, Epsilon);
-
- // Derive tangent-space viewing vector from angle to normal (pointing towards +Z in this reference frame).
- vec3 Lo = vec3(sqrt(1.0 - cosLo*cosLo), 0.0, cosLo);
-
- // We will now pre-integrate Cook-Torrance BRDF for a solid white environment and save results into a 2D LUT.
- // DFG1 & DFG2 are terms of split-sum approximation of the reflectance integral.
- // For derivation see: "Moving Frostbite to Physically Based Rendering 3.0", SIGGRAPH 2014, section 4.9.2.
- float DFG1 = 0;
- float DFG2 = 0;
-
- for(uint i=0; i 0.0) {
- float G = gaSchlickGGX_IBL(cosLi, cosLo, roughness);
- float Gv = G * cosLoLh / (cosLh * cosLo);
- float Fc = pow(1.0 - cosLoLh, 5);
-
- DFG1 += (1 - Fc) * Gv;
- DFG2 += Fc * Gv;
- }
- }
-
- imageStore(LUT, ivec2(gl_GlobalInvocationID), vec4(DFG1, DFG2, 0, 0) * InvNumSamples);
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/spmap_cs.glsl b/igibson/render/mesh_renderer/shaders/460/spmap_cs.glsl
deleted file mode 100644
index f798329e5..000000000
--- a/igibson/render/mesh_renderer/shaders/460/spmap_cs.glsl
+++ /dev/null
@@ -1,164 +0,0 @@
-#version 460 core
-// Physically Based Rendering
-// Copyright (c) 2017-2018 Michał Siejak
-
-// Pre-filters environment cube map using GGX NDF importance sampling.
-// Part of specular IBL split-sum approximation.
-
-const float PI = 3.141592;
-const float TwoPI = 2 * PI;
-const float Epsilon = 0.00001;
-
-const uint NumSamples = 1024;
-const float InvNumSamples = 1.0 / float(NumSamples);
-
-// In OpenGL only a single mip level is bound.
-const int NumMipLevels = 1;
-layout(binding=0) uniform samplerCube inputTexture;
-layout(binding=0, rgba16f) restrict writeonly uniform imageCube outputTexture[NumMipLevels];
-
-// Roughness value to pre-filter for.
-layout(location=0) uniform float roughness;
-
-#define PARAM_LEVEL 0
-#define PARAM_ROUGHNESS roughness
-
-// Compute Van der Corput radical inverse
-// See: http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html
-float radicalInverse_VdC(uint bits)
-{
- bits = (bits << 16u) | (bits >> 16u);
- bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
- bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
- bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
- bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
- return float(bits) * 2.3283064365386963e-10; // / 0x100000000
-}
-
-// Sample i-th point from Hammersley point set of NumSamples points total.
-vec2 sampleHammersley(uint i)
-{
- return vec2(i * InvNumSamples, radicalInverse_VdC(i));
-}
-
-// Importance sample GGX normal distribution function for a fixed roughness value.
-// This returns normalized half-vector between Li & Lo.
-// For derivation see: http://blog.tobias-franke.eu/2014/03/30/notes_on_importance_sampling.html
-vec3 sampleGGX(float u1, float u2, float roughness)
-{
- float alpha = roughness * roughness;
-
- float cosTheta = sqrt((1.0 - u2) / (1.0 + (alpha*alpha - 1.0) * u2));
- float sinTheta = sqrt(1.0 - cosTheta*cosTheta); // Trig. identity
- float phi = TwoPI * u1;
-
- // Convert to Cartesian upon return.
- return vec3(sinTheta * cos(phi), sinTheta * sin(phi), cosTheta);
-}
-
-// GGX/Towbridge-Reitz normal distribution function.
-// Uses Disney's reparametrization of alpha = roughness^2.
-float ndfGGX(float cosLh, float roughness)
-{
- float alpha = roughness * roughness;
- float alphaSq = alpha * alpha;
-
- float denom = (cosLh * cosLh) * (alphaSq - 1.0) + 1.0;
- return alphaSq / (PI * denom * denom);
-}
-
-// Calculate normalized sampling direction vector based on current fragment coordinates (gl_GlobalInvocationID.xyz).
-// This is essentially "inverse-sampling": we reconstruct what the sampling vector would be if we wanted it to "hit"
-// this particular fragment in a cubemap.
-// See: OpenGL core profile specs, section 8.13.
-vec3 getSamplingVector()
-{
- vec2 st = gl_GlobalInvocationID.xy/vec2(imageSize(outputTexture[PARAM_LEVEL]));
- vec2 uv = 2.0 * vec2(st.x, 1.0-st.y) - vec2(1.0);
-
- vec3 ret;
- // Sadly 'switch' doesn't seem to work, at least on NVIDIA.
- if(gl_GlobalInvocationID.z == 0) ret = vec3(1.0, uv.y, -uv.x);
- else if(gl_GlobalInvocationID.z == 1) ret = vec3(-1.0, uv.y, uv.x);
- else if(gl_GlobalInvocationID.z == 2) ret = vec3(uv.x, 1.0, -uv.y);
- else if(gl_GlobalInvocationID.z == 3) ret = vec3(uv.x, -1.0, uv.y);
- else if(gl_GlobalInvocationID.z == 4) ret = vec3(uv.x, uv.y, 1.0);
- else if(gl_GlobalInvocationID.z == 5) ret = vec3(-uv.x, uv.y, -1.0);
- return normalize(ret);
-}
-
-// Compute orthonormal basis for converting from tanget/shading space to world space.
-void computeBasisVectors(const vec3 N, out vec3 S, out vec3 T)
-{
- // Branchless select non-degenerate T.
- T = cross(N, vec3(0.0, 1.0, 0.0));
- T = mix(cross(N, vec3(1.0, 0.0, 0.0)), T, step(Epsilon, dot(T, T)));
-
- T = normalize(T);
- S = normalize(cross(N, T));
-}
-
-// Convert point from tangent/shading space to world space.
-vec3 tangentToWorld(const vec3 v, const vec3 N, const vec3 S, const vec3 T)
-{
- return S * v.x + T * v.y + N * v.z;
-}
-
-layout(local_size_x=32, local_size_y=32, local_size_z=1) in;
-void main(void)
-{
- // Make sure we won't write past output when computing higher mipmap levels.
- ivec2 outputSize = imageSize(outputTexture[PARAM_LEVEL]);
- if(gl_GlobalInvocationID.x >= outputSize.x || gl_GlobalInvocationID.y >= outputSize.y) {
- return;
- }
-
- // Solid angle associated with a single cubemap texel at zero mipmap level.
- // This will come in handy for importance sampling below.
- vec2 inputSize = vec2(textureSize(inputTexture, 0));
- float wt = 4.0 * PI / (6 * inputSize.x * inputSize.y);
-
- // Approximation: Assume zero viewing angle (isotropic reflections).
- vec3 N = getSamplingVector();
- vec3 Lo = N;
-
- vec3 S, T;
- computeBasisVectors(N, S, T);
-
- vec3 color = vec3(0);
- float weight = 0;
-
- // Convolve environment map using GGX NDF importance sampling.
- // Weight by cosine term since Epic claims it generally improves quality.
- for(uint i=0; i 0.0) {
- // Use Mipmap Filtered Importance Sampling to improve convergence.
- // See: https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch20.html, section 20.4
-
- float cosLh = max(dot(N, Lh), 0.0);
-
- // GGX normal distribution function (D term) probability density function.
- // Scaling by 1/4 is due to change of density in terms of Lh to Li (and since N=V, rest of the scaling factor cancels out).
- float pdf = ndfGGX(cosLh, PARAM_ROUGHNESS) * 0.25;
-
- // Solid angle associated with this sample.
- float ws = 1.0 / (NumSamples * pdf);
-
- // Mip level to sample from.
- float mipLevel = max(0.5 * log2(ws / wt) + 1.0, 0.0);
-
- color += textureLod(inputTexture, Li, mipLevel).rgb * cosLi;
- weight += cosLi;
- }
- }
- color /= weight;
-
- imageStore(outputTexture[PARAM_LEVEL], ivec3(gl_GlobalInvocationID), vec4(color, 1.0));
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/text_frag.shader b/igibson/render/mesh_renderer/shaders/460/text_frag.shader
deleted file mode 100644
index e6957fbee..000000000
--- a/igibson/render/mesh_renderer/shaders/460/text_frag.shader
+++ /dev/null
@@ -1,23 +0,0 @@
-#version 460
-in vec2 TexCoords;
-layout(location = 0) out vec4 color;
-
-// Alpha value of the background - set to -1 if rendering text in foreground (default)
-uniform float backgroundAlpha = -1.0;
-uniform sampler2D text;
-uniform vec3 textColor;
-uniform vec3 backgroundColor;
-
-void main()
-{
- // Sample and color text
- if (backgroundAlpha < 0) {
- vec4 sampled = vec4(1.0, 1.0, 1.0, texture(text, TexCoords).r);
- color = vec4(textColor, 1.0) * sampled;
- }
- // Sample and color background
- else {
- vec4 sampled = vec4(1.0, 1.0, 1.0, backgroundAlpha);
- color = vec4(backgroundColor, 1.0) * sampled;
- }
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/text_vert.shader b/igibson/render/mesh_renderer/shaders/460/text_vert.shader
deleted file mode 100644
index fc33af532..000000000
--- a/igibson/render/mesh_renderer/shaders/460/text_vert.shader
+++ /dev/null
@@ -1,13 +0,0 @@
-#version 460
-layout (location = 0) in vec4 vertex; // contains pos then tex as vec2
-out vec2 TexCoords;
-
-// Indicates whether this is the foreground or background (F for text, B for quad)
-uniform float background = 0.0;
-uniform mat4 projection;
-
-void main()
-{
- gl_Position = projection * vec4(vertex.xy, -0.1 * background, 1.0);
- TexCoords = vertex.zw;
-}
diff --git a/igibson/render/mesh_renderer/shaders/460/vert.shader b/igibson/render/mesh_renderer/shaders/460/vert.shader
deleted file mode 100644
index 00867e721..000000000
--- a/igibson/render/mesh_renderer/shaders/460/vert.shader
+++ /dev/null
@@ -1,85 +0,0 @@
-#version 460
-uniform mat4 V;
-uniform mat4 last_V;
-uniform mat4 lightV;
-uniform mat4 P;
-uniform mat4 lightP;
-uniform mat4 pose_rot;
-uniform mat4 pose_trans;
-uniform mat4 last_rot;
-uniform mat4 last_trans;
-uniform vec3 semantic_seg_color;
-uniform vec3 instance_seg_color;
-uniform vec3 diffuse_color;
-uniform vec3 uv_transform_param;
-uniform int shadow_pass;
-
-layout (location=0) in vec3 position;
-layout (location=1) in vec3 normal;
-layout (location=2) in vec2 texCoords;
-layout (location=3) in vec3 tangent;
-layout (location=4) in vec3 bitangent;
-
-out vec2 theCoords;
-out vec3 Normal_world;
-out vec3 FragPos;
-out vec3 Normal_cam;
-out vec3 Semantic_seg_color;
-out vec3 Instance_seg_color;
-out vec3 Pos_cam;
-out vec3 Pos_cam_projected;
-out vec3 Diffuse_color;
-out mat3 TBN;
-out vec4 FragPosLightSpace;
-out vec3 Pos_cam_prev;
-out vec2 Optical_flow;
-
-
-void main() {
- vec4 world_position4 = pose_trans * pose_rot * vec4(position, 1);
- FragPos = vec3(world_position4.xyz / world_position4.w);// in world coordinate
- Normal_world = normalize(mat3(pose_rot) * normal);// in world coordinate
- vec4 pos_cam4_projected;
- vec4 pos_cam4;
- vec4 Position_prev;
- vec4 pos_cam_prev4;
-
- if (shadow_pass == 1)
- {
- gl_Position = lightP * lightV * pose_trans * pose_rot * vec4(position, 1);
- Normal_cam = normalize(mat3(lightV) * mat3(pose_rot) * normal);// in camera coordinate
- pos_cam4_projected = lightP * lightV * pose_trans * pose_rot * vec4(position, 1);
- pos_cam4 = lightV * pose_trans * pose_rot * vec4(position, 1);
- // dummy values for shadow pass
- Optical_flow = vec2(0,0);
- Position_prev = vec4(0,0,0,0);
- pos_cam_prev4 = vec4(0,0,0,0);
- Pos_cam_prev = vec3(0,0,0);
- } else {
- gl_Position = P * V * pose_trans * pose_rot * vec4(position, 1);
- Normal_cam = normalize(mat3(V) * mat3(pose_rot) * normal);// in camera coordinate
- pos_cam4_projected = P * V * pose_trans * pose_rot * vec4(position, 1);
- pos_cam4 = V * pose_trans * pose_rot * vec4(position, 1);
- Position_prev = P * last_V * last_trans * last_rot * vec4(position, 1);
- pos_cam_prev4 = last_V * last_trans * last_rot * vec4(position, 1);
- Pos_cam_prev = pos_cam_prev4.xyz / pos_cam_prev4.w;
- Optical_flow = gl_Position.xy/gl_Position.w - Position_prev.xy/Position_prev.w;
- }
-
- Pos_cam = pos_cam4.xyz / pos_cam4.w;
- Pos_cam_projected = pos_cam4_projected.xyz / pos_cam4_projected.w;
-
- theCoords.x = (cos(uv_transform_param[2]) * texCoords.x * uv_transform_param[0])
- - (sin(uv_transform_param[2]) * texCoords.y * uv_transform_param[1]);
- theCoords.y = (sin(uv_transform_param[2]) * texCoords.x * uv_transform_param[0])
- + (cos(uv_transform_param[2]) * texCoords.y * uv_transform_param[1]);
-
- Semantic_seg_color = semantic_seg_color;
- Instance_seg_color = instance_seg_color;
- Diffuse_color = diffuse_color;
- vec3 T = normalize(vec3(pose_trans * pose_rot * vec4(tangent, 0.0)));
- vec3 B = normalize(vec3(pose_trans * pose_rot * vec4(bitangent, 0.0)));
- vec3 N = normalize(vec3(pose_trans * pose_rot * vec4(normal, 0.0)));
- TBN = mat3(T, B, N);
- FragPosLightSpace = lightP * lightV * pose_trans * pose_rot * vec4(position, 1);
-}
diff --git a/igibson/render/viewer.py b/igibson/render/viewer.py
index 391b3e8ac..c28f09890 100644
--- a/igibson/render/viewer.py
+++ b/igibson/render/viewer.py
@@ -1,37 +1,36 @@
import logging
import os
import random
-import sys
import time
+from os.path import isfile, join
import cv2
import numpy as np
import pybullet as p
from igibson.objects.visual_marker import VisualMarker
-from igibson.utils.constants import ViewerMode
from igibson.utils.utils import rotate_vector_2d
class ViewerVR:
- def __init__(self, use_companion_window, frame_save_path=None, renderer=None):
+ def __init__(self, use_companion_window, frame_save_path=None):
"""
:param use_companion_window: whether to render companion window (passed in automatically from VrSettings)
"""
- self.renderer = renderer
+ self.renderer = None
self.use_companion_window = use_companion_window
self.frame_save_path = frame_save_path
self.frame_counter = 0
self.frame_save_video_handler = None
- def update(self):
+ def update(self, save_video=False, task_name=""):
"""
Updates viewer.
"""
if not self.renderer:
raise RuntimeError("Unable to render without a renderer attached to the ViewerVR!")
if self.frame_save_path:
- frame = cv2.cvtColor(self.renderer.render(return_buffer=True)[0], cv2.COLOR_RGB2BGR)
+ frame = cv2.cvtColor(self.renderer.render(return_frame=True)[0], cv2.COLOR_RGB2BGR)
frame = (frame * 255).astype(np.uint8)
# Save as a video
if self.frame_save_path.endswith(".mp4"):
@@ -66,24 +65,47 @@ def __init__(
):
self.renderer = renderer
self.simulator = simulator
+ self.recording = False
+ self.task = ""
cv2.namedWindow("RobotView")
cv2.moveWindow("RobotView", 0, 0)
- def update(self):
+ def update(self, save_video=False, task_name=""):
+ self.task = task_name
+ if save_video and not self.recording:
+ logging.info("Start recording*****************************")
+ # Current time string to use to save the temporal urdfs
+ timestr = time.strftime("%Y%m%d-%H%M%S")
+ # Create the subfolder
+ self.video_folder = os.path.join(
+ os.getcwd() + "/video_frames", "{}_{}_{}".format(timestr, "robotview", task_name)
+ )
+ logging.info(self.video_folder)
+ os.makedirs(self.video_folder, exist_ok=True)
+ self.recording = True
+ self.frame_idx = 0
+
if not self.renderer is None:
frames = self.renderer.render_robot_cameras(modes=("rgb"))
if len(frames) > 0:
frame = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR)
cv2.imshow("RobotView", frame)
+ if self.recording:
+ cv2.imwrite(
+ os.path.join(self.video_folder, "{:05d}.png".format(self.frame_idx)), (frame * 255).astype(np.uint8)
+ )
+ self.frame_idx += 1
cv2.waitKey(1)
+ def make_video(self, task_name=""):
+ convert_frames_to_video(self.video_folder+"/", task_name + "_first_person_pov.mp4", 25.0)
class Viewer:
def __init__(
self,
- initial_pos=[6.6, 5.6, 3.4], # [0, 0, 1.2],
- initial_view_direction=[-0.7, -0.6, -0.4], # [1, 0, 0],
+ initial_pos=[0, 0, 1.2],
+ initial_view_direction=[1, 0, 0],
initial_up=[0, 0, 1],
simulator=None,
renderer=None,
@@ -99,10 +121,21 @@ def __init__(
:param renderer: iGibson renderer
:param min_cam_z: minimum camera z
"""
- self.initial_pos = initial_pos
- self.initial_view_direction = initial_view_direction
- self.reset_viewer()
+ self.px = initial_pos[0]
+ self.py = initial_pos[1]
+ self.pz = initial_pos[2]
+ self.theta = np.arctan2(initial_view_direction[1], initial_view_direction[0])
+ self.phi = np.arctan2(
+ initial_view_direction[2], np.sqrt(initial_view_direction[0] ** 2 + initial_view_direction[1] ** 2)
+ )
self.min_cam_z = min_cam_z
+ self.show_help = 0
+
+ self._mouse_ix, self._mouse_iy = -1, -1
+ self.left_down = False
+ self.middle_down = False
+ self.right_down = False
+ self.view_direction = np.array(initial_view_direction)
self.up = initial_up
self.renderer = renderer
self.simulator = simulator
@@ -111,22 +144,23 @@ def __init__(
# Flag to control if the mouse interface is in navigation, manipulation
# or motion planning/execution mode
- self.mode = ViewerMode.NAVIGATION
+ self.manipulation_mode = 0
# Video recording
self.recording = False # Boolean if we are recording frames from the viewer
self.pause_recording = False # Flag to pause/resume recording
self.video_folder = ""
+ self.task = ""
# in case of robosuite viewer, we open only one window.
# Later use the numpad to activate additional cameras
self.is_robosuite = self.renderer.rendering_settings.is_robosuite
- cv2.namedWindow("Viewer")
- cv2.moveWindow("Viewer", 0, 0)
+ cv2.namedWindow("ExternalView")
+ cv2.moveWindow("ExternalView", 0, 0)
if not self.is_robosuite:
cv2.namedWindow("RobotView")
- cv2.setMouseCallback("Viewer", self.mouse_callback)
+ cv2.setMouseCallback("ExternalView", self.mouse_callback)
self.create_visual_object()
self.planner = None
self.block_command = False
@@ -145,18 +179,14 @@ def create_visual_object(self):
"""
self.constraint_marker = VisualMarker(radius=0.04, rgba_color=[0, 0, 1, 1])
self.constraint_marker2 = VisualMarker(
- visual_shape=p.GEOM_CAPSULE,
- radius=0.01,
- length=3,
- initial_offset=[0, 0, -1.5],
- rgba_color=[0, 0, 1, 1],
+ visual_shape=p.GEOM_CAPSULE, radius=0.01, length=3, initial_offset=[0, 0, -1.5], rgba_color=[0, 0, 1, 1]
)
# Simuation is done by MuJoCo when rendering robosuite envs
if not self.is_robosuite:
if self.simulator is not None:
- self.simulator.import_object(self.constraint_marker2)
- self.simulator.import_object(self.constraint_marker)
+ self.simulator.import_object(self.constraint_marker2, use_pbr=False)
+ self.simulator.import_object(self.constraint_marker, use_pbr=False)
self.constraint_marker.set_position([0, 0, -1])
self.constraint_marker2.set_position([0, 0, -1])
@@ -206,10 +236,10 @@ def create_constraint(self, x, y, fixed=False):
:param y: image pixel y coordinate
:param fixed: whether to create a fixed joint. Otherwise, it's a point2point joint.
"""
- camera_position = np.array([self.px, self.py, self.pz])
- self.renderer.set_camera(camera_position, camera_position + self.view_direction, self.up)
+ camera_pose = np.array([self.px, self.py, self.pz])
+ self.renderer.set_camera(camera_pose, camera_pose + self.view_direction, self.up)
- clicked_point_in_cf = np.array(
+ position_cam = np.array(
[
(x - self.renderer.width / 2)
/ float(self.renderer.width / 2)
@@ -217,14 +247,15 @@ def create_constraint(self, x, y, fixed=False):
-(y - self.renderer.height / 2)
/ float(self.renderer.height / 2)
* np.tan(self.renderer.vertical_fov / 2.0 / 180.0 * np.pi),
- -1, # z axis is pointing into the camera in OpenGL convention
+ -1,
1,
]
)
- clicked_point_in_cf[:3] *= 5
+ position_cam[:3] *= 5
- clicked_point_in_wf = np.linalg.inv(self.renderer.V).dot(clicked_point_in_cf)
- res = p.rayTest(camera_position, clicked_point_in_wf[:3])
+ position_world = np.linalg.inv(self.renderer.V).dot(position_cam)
+ position_eye = camera_pose
+ res = p.rayTest(position_eye, position_world[:3])
if len(res) > 0 and res[0][0] != -1:
object_id, link_id, _, hit_pos, hit_normal = res[0]
p.changeDynamics(object_id, -1, activationState=p.ACTIVATION_STATE_WAKE_UP)
@@ -240,9 +271,9 @@ def create_constraint(self, x, y, fixed=False):
)
self.constraint_marker.set_position(hit_pos)
self.constraint_marker2.set_position(hit_pos)
- self.dist = np.linalg.norm(np.array(hit_pos) - camera_position)
+ self.dist = np.linalg.norm(np.array(hit_pos) - camera_pose)
cid = p.createConstraint(
- parentBodyUniqueId=self.constraint_marker.get_body_id(),
+ parentBodyUniqueId=self.constraint_marker.body_id,
parentLinkIndex=-1,
childBodyUniqueId=object_id,
childLinkIndex=link_id,
@@ -408,7 +439,7 @@ def mouse_callback(self, event, x, y, flags, params):
"""
# Navigation mode
- if self.mode == ViewerMode.NAVIGATION:
+ if self.manipulation_mode == 0:
# Only once, when pressing left mouse while ctrl key is pressed
if flags == cv2.EVENT_FLAG_LBUTTON + cv2.EVENT_FLAG_CTRLKEY and not self.right_down:
self._mouse_ix, self._mouse_iy = x, y
@@ -462,7 +493,7 @@ def mouse_callback(self, event, x, y, flags, params):
# if middle button was pressed we get closer/further away in the viewing direction
elif self.middle_down:
- d_vd = (y - self._mouse_iy) / 25.0
+ d_vd = (y - self._mouse_iy) / 100.0
self._mouse_iy = y
motion_along_vd = d_vd * self.view_direction
@@ -488,7 +519,7 @@ def mouse_callback(self, event, x, y, flags, params):
self.pz = max(self.min_cam_z, self.pz)
# Manipulation mode
- elif self.mode == ViewerMode.MANIPULATION:
+ elif self.manipulation_mode == 1:
# Middle mouse button press or only once, when pressing left mouse
# while shift key is pressed (Mac compatibility)
if (event == cv2.EVENT_MBUTTONDOWN) or (
@@ -521,7 +552,7 @@ def mouse_callback(self, event, x, y, flags, params):
self.move_constraint_z(dy)
# Motion planning / execution mode
- elif self.mode == ViewerMode.PLANNING and not self.block_command:
+ elif self.manipulation_mode == 2 and not self.block_command:
# left mouse button press
if event == cv2.EVENT_LBUTTONDOWN:
self._mouse_ix, self._mouse_iy = x, y
@@ -561,94 +592,68 @@ def show_help_text(self, frame):
"""
Show help text
"""
- if self.show_help % 3 == 0:
+ if self.show_help < 0:
return
- elif self.show_help % 3 == 1:
+
+ if self.show_help >= 150:
first_color = (255, 0, 0)
help_text = "Keyboard cheatsheet:"
cv2.putText(frame, help_text, (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
- help_text = "'w','s','a','d','t','g': forward, backwards, left, right, up, down (any mode)"
+ help_text = "'w','a','s','d': up, left, down, right (any mode)"
cv2.putText(frame, help_text, (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "'q','e': turn left, turn right (any mode)"
cv2.putText(frame, help_text, (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "'m': switch control mode across navigation, manipulation, and planning"
cv2.putText(frame, help_text, (10, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "'r': Start/stop recording frames (results in \\tmp folder)"
cv2.putText(frame, help_text, (10, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "'p': Pause/resume recording"
cv2.putText(frame, help_text, (10, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
- help_text = "'z': Save new resetting viewing pose"
+ help_text = "'h': Show this help on screen"
cv2.putText(frame, help_text, (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
- help_text = "'x': Reset viewer to the saved viewing pose"
- cv2.putText(frame, help_text, (10, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
- help_text = "'h': Show this help on screen (1 of 2)"
- cv2.putText(frame, help_text, (10, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "'ESC': Quit"
- cv2.putText(frame, help_text, (10, 260), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
- print(help_text)
- elif self.show_help % 3 == 2:
+ cv2.putText(frame, help_text, (10, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.5, first_color, 1, cv2.LINE_AA)
+ else:
second_color = (255, 0, 255)
help_text = "Mouse control in navigation mode:"
cv2.putText(frame, help_text, (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Left click and drag: rotate camera"
cv2.putText(frame, help_text, (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "CTRL + left click and drag: translate camera"
cv2.putText(frame, help_text, (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Middle click and drag (linux) or left SHIFT + left click and drag:"
cv2.putText(frame, help_text, (10, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "translate camera closer/further away in the viewing direction"
cv2.putText(frame, help_text, (10, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Mouse control in manipulation mode:"
cv2.putText(frame, help_text, (10, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Left click and drag: create ball-joint connection to clicked object and move it"
cv2.putText(frame, help_text, (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Middle click and drag (linux) or left SHIFT + left click and drag: create rigid connection"
cv2.putText(frame, help_text, (10, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = " to object and move it"
cv2.putText(frame, help_text, (10, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "CTRL + click and drag: up/down of the mouse moves object further/closer"
cv2.putText(frame, help_text, (10, 260), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Mouse control in planning mode:"
cv2.putText(frame, help_text, (10, 280), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = (
"Left click: create (click), visualize (drag) and plan / execute (release) a base motion subgoal"
)
cv2.putText(frame, help_text, (10, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "for the robot base to reach the physical point that corresponds to the clicked pixel"
cv2.putText(frame, help_text, (10, 320), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "Middle click: create, and plan / execute an arm motion subgoal"
cv2.putText(frame, help_text, (10, 340), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
help_text = "for the robot end-effector to reach the physical point that corresponds to the clicked pixel"
cv2.putText(frame, help_text, (10, 360), cv2.FONT_HERSHEY_SIMPLEX, 0.5, second_color, 1, cv2.LINE_AA)
- print(help_text)
+ self.show_help -= 1
- def update(self):
+ def update(self, save_video=False, task_name=""):
"""
Update images of Viewer
"""
+ self.task = task_name
camera_pose = np.array([self.px, self.py, self.pz])
if self.renderer is not None:
self.renderer.set_camera(camera_pose, camera_pose + self.view_direction, self.up)
@@ -682,7 +687,7 @@ def update(self):
)
cv2.putText(
frame,
- ["nav mode", "manip mode", "planning mode"][self.mode],
+ ["nav mode", "manip mode", "planning mode"][self.manipulation_mode],
(10, 60),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
@@ -692,18 +697,17 @@ def update(self):
)
self.show_help_text(frame)
- cv2.imshow("Viewer", frame)
+ cv2.imshow("ExternalView", frame)
# We keep some double functinality for "backcompatibility"
q = cv2.waitKey(1)
move_vec = self.view_direction[:2]
- # step size is 0.1m
- step_size = 0.1
- move_vec = move_vec / np.linalg.norm(move_vec) * step_size
+ # step size is 0.05m
+ move_vec = move_vec / np.linalg.norm(move_vec) * 0.05
# show help text
if q == ord("h"):
- self.show_help += 1
+ self.show_help = 300
# move
elif q in [ord("w"), ord("s"), ord("a"), ord("d")]:
@@ -718,22 +722,16 @@ def update(self):
move_vec = rotate_vector_2d(move_vec, yaw)
self.px += move_vec[0]
self.py += move_vec[1]
- if self.mode == ViewerMode.MANIPULATION:
+ if self.manipulation_mode == 1:
self.move_constraint(self._mouse_ix, self._mouse_iy)
- elif q in [ord("t")]:
- self.pz += step_size
-
- elif q in [ord("g")]:
- self.pz -= step_size
-
# turn left
elif q == ord("q"):
self.theta += np.pi / 36
self.view_direction = np.array(
[np.cos(self.theta) * np.cos(self.phi), np.sin(self.theta) * np.cos(self.phi), np.sin(self.phi)]
)
- if self.mode == ViewerMode.MANIPULATION:
+ if self.manipulation_mode == 1:
self.move_constraint(self._mouse_ix, self._mouse_iy)
# turn right
@@ -742,7 +740,7 @@ def update(self):
self.view_direction = np.array(
[np.cos(self.theta) * np.cos(self.phi), np.sin(self.theta) * np.cos(self.phi), np.sin(self.phi)]
)
- if self.mode == ViewerMode.MANIPULATION:
+ if self.manipulation_mode == 1:
self.move_constraint(self._mouse_ix, self._mouse_iy)
# quit (Esc)
@@ -754,22 +752,29 @@ def update(self):
)
logging.info("ffmpeg -i %5d.png -y -c:a copy -c:v libx264 -crf 18 -preset veryslow -r 30 video.mp4")
logging.info("The last folder you collected images for a video was: " + self.video_folder)
- sys.exit()
+ exit()
# Start/Stop recording. Stopping saves frames to files
- elif q == ord("r"):
+ elif (q == ord("r") or save_video):
+ save_video = False
if self.recording:
self.recording = False
self.pause_recording = False
else:
logging.info("Start recording*****************************")
+ logging.info("Press 'r' to stop recording and 'p' to pause/resume recording.")
# Current time string to use to save the temporal urdfs
timestr = time.strftime("%Y%m%d-%H%M%S")
- # Create the subfolder
- self.video_folder = os.path.join(
- "/tmp", "{}_{}_{}".format(timestr, random.getrandbits(64), os.getpid())
+ self.robotview_folder = os.path.join(
+ os.getcwd() + "/video_frames", "{}_{}_{}".format(timestr, "robotview", task_name)
)
- os.makedirs(self.video_folder, exist_ok=True)
+ self.externalview_folder = os.path.join(
+ os.getcwd() + "/video_frames", "{}_{}_{}".format(timestr, "externalview", task_name)
+ )
+ logging.info("robotview folder: " + self.robotview_folder)
+ logging.info("externalview folder: " + self.externalview_folder)
+ os.makedirs(self.robotview_folder, exist_ok=True)
+ os.makedirs(self.externalview_folder, exist_ok=True)
self.recording = True
self.frame_idx = 0
@@ -785,19 +790,7 @@ def update(self):
self.left_down = False
self.middle_down = False
self.right_down = False
- if self.planner is not None:
- self.mode = (self.mode + 1) % len(ViewerMode)
- else:
- # Disable planning mode if planner not initialized (assume planning mode is the last available mode)
- assert ViewerMode.PLANNING == len(ViewerMode) - 1, "Planning mode is not the last available viewer mode"
- self.mode = (self.mode + 1) % (len(ViewerMode) - 1)
-
- elif q == ord("z"):
- self.initial_pos = [self.px, self.py, self.pz]
- self.initial_view_direction = self.view_direction
-
- elif q == ord("x"):
- self.reset_viewer()
+ self.manipulation_mode = (self.manipulation_mode + 1) % 3
elif (
self.is_robosuite
@@ -815,7 +808,7 @@ def update(self):
if self.recording and not self.pause_recording:
cv2.imwrite(
- os.path.join(self.video_folder, "{:05d}.png".format(self.frame_idx)), (frame * 255).astype(np.uint8)
+ os.path.join(self.externalview_folder, "{:05d}.png".format(self.frame_idx)), (frame * 255).astype(np.uint8)
)
self.frame_idx += 1
@@ -833,25 +826,42 @@ def update(self):
if len(frames) > 0:
frame = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR)
cv2.imshow("RobotView", frame)
-
- def reset_viewer(self):
- self.px = self.initial_pos[0]
- self.py = self.initial_pos[1]
- self.pz = self.initial_pos[2]
- self.initial_view_direction /= np.linalg.norm(self.initial_view_direction)
- self.theta = np.arctan2(self.initial_view_direction[1], self.initial_view_direction[0])
- self.phi = np.arctan2(
- self.initial_view_direction[2],
- np.sqrt(self.initial_view_direction[0] ** 2 + self.initial_view_direction[1] ** 2),
- )
- self.show_help = 0
-
- self._mouse_ix, self._mouse_iy = -1, -1
- self.left_down = False
- self.middle_down = False
- self.right_down = False
- self.view_direction = np.array(self.initial_view_direction)
-
+ if self.recording:
+ cv2.imwrite(
+ os.path.join(self.robotview_folder, "{:05d}.png".format(self.frame_idx)), (frame * 255).astype(np.uint8)
+ )
+ self.frame_idx += 1
+
+ self.initialize = False
+
+ def make_video(self, task_name=""):
+ convert_frames_to_video(self.robotview_folder+"/", task_name + "_first_person_pov.mp4", 25.0)
+ convert_frames_to_video(self.externalview_folder+"/", task_name + "_third_person_pov.mp4", 25.0)
+
+def convert_frames_to_video(pathIn,pathOut,fps):
+ """Makes mp4 video from frames collected from recording."""
+ frame_array = []
+ files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]
+ #for sorting the file names properly
+ files.sort(key = lambda x: int(x[-9:-4]))
+ img1_file = pathIn + files[0]
+ img1 = cv2.imread(img1_file)
+ height, width, layers = img1.shape
+ size = (width, height)
+
+ for i in range(len(files)):
+ filename=pathIn + files[i]
+ #reading each files
+ img = cv2.imread(filename)
+ height, width, layers = img.shape
+ size = (width,height)
+ #inserting the frames into an image array
+ frame_array.append(img)
+ out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MP4V'), fps, size)
+ for i in range(len(frame_array)):
+ # writing to a image array
+ out.write(frame_array[i])
+ out.release()
if __name__ == "__main__":
viewer = Viewer()
diff --git a/igibson/reward_functions/reward_function_base.py b/igibson/reward_functions/reward_function_base.py
index 5fd298b5e..b36e9d21f 100644
--- a/igibson/reward_functions/reward_function_base.py
+++ b/igibson/reward_functions/reward_function_base.py
@@ -32,12 +32,3 @@ def get_reward(self, task, env):
:return: reward, info
"""
raise NotImplementedError()
-
- def reset(self, task, env):
- """
- Reward function-specific reset
-
- :param task: task instance
- :param env: environment instance
- """
- return
diff --git a/igibson/robots/__init__.py b/igibson/robots/__init__.py
index ee9d3e62e..e69de29bb 100644
--- a/igibson/robots/__init__.py
+++ b/igibson/robots/__init__.py
@@ -1,16 +0,0 @@
-from igibson.robots.active_camera_robot import ActiveCameraRobot
-from igibson.robots.ant import Ant
-from igibson.robots.behavior_robot import BehaviorRobot
-from igibson.robots.fetch import Fetch
-from igibson.robots.freight import Freight
-from igibson.robots.husky import Husky
-from igibson.robots.jr2 import JR2
-from igibson.robots.locobot import Locobot
-from igibson.robots.locomotion_robot import LocomotionRobot
-from igibson.robots.manipulation_robot import ManipulationRobot
-from igibson.robots.robot_base import REGISTERED_ROBOTS, BaseRobot
-from igibson.robots.turtlebot import Turtlebot
-from igibson.robots.two_wheel_robot import TwoWheelRobot
-
-# TODO: Remove once BehaviorRobot extends from proper BaseRobot class
-REGISTERED_ROBOTS["BehaviorRobot"] = BehaviorRobot
diff --git a/igibson/robots/active_camera_robot.py b/igibson/robots/active_camera_robot.py
deleted file mode 100644
index 5c4a44547..000000000
--- a/igibson/robots/active_camera_robot.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from abc import abstractmethod
-
-import gym
-import numpy as np
-from transforms3d.euler import euler2quat
-from transforms3d.quaternions import qmult, quat2mat
-
-from igibson.controllers import LocomotionController
-from igibson.robots.robot_base import BaseRobot
-from igibson.utils.python_utils import assert_valid_key
-
-
-class ActiveCameraRobot(BaseRobot):
- """
- Robot that is is equipped with an onboard camera that can be moved independently from the robot's other kinematic
- joints (e.g.: independent of base and arm for a mobile manipulator).
-
- NOTE: controller_config should, at the minimum, contain:
- camera: controller specifications for the controller to control this robot's camera.
- Should include:
-
- - name: Controller to create
- - relevant to the controller being created. Note that all values will have default
- values specified, but setting these individual kwargs will override them
-
- """
-
- def _validate_configuration(self):
- # Make sure a camera controller is specified
- assert (
- "camera" in self._controllers
- ), "Controller 'camera' must exist in controllers! Current controllers: {}".format(
- list(self._controllers.keys())
- )
-
- # run super
- super()._validate_configuration()
-
- def _get_proprioception_dict(self):
- dic = super()._get_proprioception_dict()
-
- # Add camera pos info
- dic["camera_qpos"] = self.joint_positions[self.camera_control_idx]
- dic["camera_qpos_sin"] = np.sin(self.joint_positions[self.camera_control_idx])
- dic["camera_qpos_cos"] = np.cos(self.joint_positions[self.camera_control_idx])
- dic["camera_qvel"] = self.joint_velocities[self.camera_control_idx]
-
- return dic
-
- @property
- def default_proprio_obs(self):
- obs_keys = super().default_proprio_obs
- return obs_keys + ["camera_qpos_sin", "camera_qpos_cos"]
-
- @property
- def controller_order(self):
- # By default, only camera is supported
- return ["camera"]
-
- @property
- def _default_controllers(self):
- # Always call super first
- controllers = super()._default_controllers
-
- # For best generalizability use, joint controller as default
- controllers["camera"] = "JointController"
-
- return controllers
-
- @property
- def _default_camera_joint_controller_config(self):
- """
- :return: Dict[str, Any] Default camera joint controller config to control this robot's camera
- """
- return {
- "name": "JointController",
- "control_freq": self.control_freq,
- "motor_type": "velocity",
- "control_limits": self.control_limits,
- "joint_idx": self.camera_control_idx,
- "command_output_limits": "default",
- "use_delta_commands": False,
- "use_compliant_mode": True,
- }
-
- @property
- def _default_controller_config(self):
- # Always run super method first
- cfg = super()._default_controller_config
-
- # We additionally add in camera default
- cfg["camera"] = {
- self._default_camera_joint_controller_config["name"]: self._default_camera_joint_controller_config,
- }
-
- return cfg
-
- @property
- @abstractmethod
- def camera_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to camera joints.
- """
- raise NotImplementedError
diff --git a/igibson/robots/ant.py b/igibson/robots/ant.py
deleted file mode 100644
index e9e9a3339..000000000
--- a/igibson/robots/ant.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-
-import numpy as np
-
-import igibson
-from igibson.robots.locomotion_robot import LocomotionRobot
-
-
-class Ant(LocomotionRobot):
- """
- OpenAI Ant Robot
- """
-
- def _create_discrete_action_space(self):
- raise ValueError("Ant does not support discrete actions!")
-
- @property
- def base_control_idx(self):
- return np.array([0, 1, 2, 3, 4, 5, 6, 7])
-
- @property
- def default_joint_pos(self):
- return np.zeros(self.n_joints)
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/ant/ant.xml")
diff --git a/igibson/robots/ant_robot.py b/igibson/robots/ant_robot.py
new file mode 100644
index 000000000..ac2fa11c1
--- /dev/null
+++ b/igibson/robots/ant_robot.py
@@ -0,0 +1,38 @@
+import gym
+import numpy as np
+
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class Ant(LocomotorRobot):
+ """
+ OpenAI Ant Robot
+ Uses joint torque control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.torque = config.get("torque", 1.0)
+ LocomotorRobot.__init__(
+ self,
+ "ant/ant.xml",
+ action_dim=8,
+ torque_coef=2.5,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="torque",
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+ self.action_high = self.torque * np.ones([self.action_dim])
+ self.action_low = -self.action_high
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ assert False, "Ant does not support discrete actions"
diff --git a/igibson/robots/behavior_robot.py b/igibson/robots/behavior_robot.py
index 0ae7ffbcf..39a9dbff7 100644
--- a/igibson/robots/behavior_robot.py
+++ b/igibson/robots/behavior_robot.py
@@ -21,31 +21,30 @@
import os
from collections import OrderedDict
-import gym
import numpy as np
import pybullet as p
from igibson import assets_path
-from igibson.external.pybullet_tools.utils import link_from_name, set_all_collisions
+from igibson.external.pybullet_tools.utils import set_all_collisions
+from igibson.object_states.factory import prepare_object_states
from igibson.objects.articulated_object import ArticulatedObject
from igibson.objects.visual_marker import VisualMarker
-from igibson.utils.constants import SemanticClass, SimulatorMode
from igibson.utils.mesh_util import quat2rotmat, xyzw2wxyz
# Helps eliminate effect of numerical error on distance threshold calculations, especially when part is at the threshold
THRESHOLD_EPSILON = 0.001
# Part offset parameters
-NECK_BASE_REL_POS_UNTRACKED = [-0.15, 0, -0.15]
-RIGHT_SHOULDER_REL_POS_UNTRACKED = [-0.15, -0.15, -0.15]
-LEFT_SHOULDER_REL_POS_UNTRACKED = [-0.15, 0.15, -0.15]
+NECK_BASE_REL_POS_UNTRACKED = [-0.15, 0, 0.3]
+RIGHT_SHOULDER_REL_POS_UNTRACKED = [-0.15, -0.15, 0.3]
+LEFT_SHOULDER_REL_POS_UNTRACKED = [-0.15, 0.15, 0.3]
EYE_LOC_POSE_UNTRACKED = ([0.05, 0, 0], [0, 0, 0, 1])
RIGHT_HAND_LOC_POSE_UNTRACKED = ([0.1, -0.12, -0.4], [-0.7, 0.7, 0.0, 0.15])
LEFT_HAND_LOC_POSE_UNTRACKED = ([0.1, 0.12, -0.4], [0.7, 0.7, 0.0, 0.15])
-NECK_BASE_REL_POS_TRACKED = [-0.15, 0, 0.3]
-RIGHT_SHOULDER_REL_POS_TRACKED = [-0.15, -0.15, 0.3]
-LEFT_SHOULDER_REL_POS_TRACKED = [-0.15, 0.15, 0.3]
+NECK_BASE_REL_POS_TRACKED = [-0.15, 0, -0.15]
+RIGHT_SHOULDER_REL_POS_TRACKED = [-0.15, -0.15, -0.15]
+LEFT_SHOULDER_REL_POS_TRACKED = [-0.15, 0.15, -0.15]
EYE_LOC_POSE_TRACKED = ([0.05, 0, 0.4], [0, 0, 0, 1])
RIGHT_HAND_LOC_POSE_TRACKED = ([0.1, -0.12, 0.05], [-0.7, 0.7, 0.0, 0.15])
LEFT_HAND_LOC_POSE_TRACKED = ([0.1, 0.12, 0.05], [0.7, 0.7, 0.0, 0.15])
@@ -76,7 +75,6 @@
HAND_LIFTING_FORCE = 300
# Assisted grasping parameters
-VISUALIZE_RAYS = False
ASSIST_FRACTION = 1.0
ARTICULATED_ASSIST_FRACTION = 0.7
MIN_ASSIST_FORCE = 0
@@ -86,9 +84,10 @@
CONSTRAINT_VIOLATION_THRESHOLD = 0.1
# Hand link index constants
-PALM_LINK_NAME = "palm"
-FINGER_TIP_LINK_NAMES = frozenset(["Tmiddle", "Imiddle", "Mmiddle", "Rmiddle", "Pmiddle"])
-THUMB_LINK_NAME = "Tmiddle"
+PALM_LINK_INDEX = 0
+FINGER_TIP_LINK_INDICES = [1, 2, 3, 4, 5]
+THUMB_LINK_INDEX = 4
+NON_THUMB_FINGERS = [1, 2, 3, 5]
# Gripper parameters
GRIPPER_GHOST_HAND_APPEAR_THRESHOLD = 0.25
@@ -115,11 +114,10 @@ def __init__(
hands=("left", "right"),
use_body=True,
use_gripper=False,
- use_ghost_hands=True,
+ use_ghost_hands=False,
normal_color=True,
show_visual_head=False,
- use_tracked_body=True,
- **kwargs
+ use_tracked_body_override=None,
):
"""
Initializes BehaviorRobot:
@@ -131,23 +129,24 @@ def __init__(
:parm use_gripper: whether the agent should use the pybullet gripper or the iGibson VR hand
:parm normal_color: whether to use normal color (grey) (when True) or alternative color (blue-tinted). The alternative
:param show_visual_head: whether to render a head cone where the BREye is
- :param use_tracked_body: sets use_tracked_body to decide on which URDF to load, and which local transforms to use
+ :param use_tracked_body_override: sets use_tracked_body no matter what is set in the VR settings. Can be
+ used to initialize a BehaviorRobot in a robotics environment.
"""
# Basic parameters
self.simulator = sim
self.robot_num = robot_num
self.hands = hands
self.use_body = use_body
- self.use_tracked_body = use_tracked_body
- if sim.mode == SimulatorMode.VR:
+ if use_tracked_body_override is None:
self.use_tracked_body = self.simulator.vr_settings.using_tracked_body
+ else:
+ self.use_tracked_body = use_tracked_body_override
self.use_gripper = use_gripper
self.use_ghost_hands = use_ghost_hands
self.normal_color = normal_color
self.show_visual_head = show_visual_head
+ self.action = np.zeros((28,))
self.action_dim = 28
- self.action = np.zeros((self.action_dim,))
- self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
# Activation parameters
self.activated = False
@@ -159,37 +158,30 @@ def __init__(
}
# Set up body parts
- self.links = dict()
+ self.parts = dict()
if "left" in self.hands:
- self.links["left_hand"] = (
- BRHand(self, hand="left", **kwargs) if not use_gripper else BRGripper(self, hand="left", **kwargs)
- )
+ self.parts["left_hand"] = BRHand(self, hand="left") if not use_gripper else BRGripper(self, hand="left")
if "right" in self.hands:
- self.links["right_hand"] = (
- BRHand(self, hand="right", **kwargs) if not use_gripper else BRGripper(self, hand="right", **kwargs)
- )
+ self.parts["right_hand"] = BRHand(self, hand="right") if not use_gripper else BRGripper(self, hand="right")
# Store reference between hands
if "left" in self.hands and "right" in self.hands:
- self.links["left_hand"].set_other_hand(self.links["right_hand"])
- self.links["right_hand"].set_other_hand(self.links["left_hand"])
+ self.parts["left_hand"].set_other_hand(self.parts["right_hand"])
+ self.parts["right_hand"].set_other_hand(self.parts["left_hand"])
if self.use_body:
- self.links["body"] = BRBody(self, **kwargs)
-
- self.links["eye"] = BREye(self, **kwargs)
+ self.parts["body"] = BRBody(self)
- def load(self, simulator):
- return [id for part in self.links.values() for id in part.load(simulator)]
+ self.parts["eye"] = BREye(self)
def set_colliders(self, enabled=False):
- self.links["left_hand"].set_colliders(enabled)
- self.links["right_hand"].set_colliders(enabled)
- self.links["body"].set_colliders(enabled)
+ self.parts["left_hand"].set_colliders(enabled)
+ self.parts["right_hand"].set_colliders(enabled)
+ self.parts["body"].set_colliders(enabled)
def set_position_orientation(self, pos, orn):
- self.links["body"].set_position_orientation_unwrapped(pos, orn)
- self.links["body"].new_pos, self.links["body"].new_orn = pos, orn
+ self.parts["body"].set_position_orientation_unwrapped(pos, orn)
+ self.parts["body"].new_pos, self.parts["body"].new_orn = pos, orn
# Local transforms for hands and eye
if self.use_tracked_body:
@@ -202,50 +194,42 @@ def set_position_orientation(self, pos, orn):
eye_loc_pose = EYE_LOC_POSE_UNTRACKED
left_hand_pos, left_hand_orn = p.multiplyTransforms(pos, orn, left_hand_loc_pose[0], left_hand_loc_pose[1])
- self.links["left_hand"].set_position_orientation(left_hand_pos, left_hand_orn)
+ self.parts["left_hand"].set_position_orientation(left_hand_pos, left_hand_orn)
right_hand_pos, right_hand_orn = p.multiplyTransforms(pos, orn, right_hand_loc_pose[0], right_hand_loc_pose[1])
- self.links["right_hand"].set_position_orientation(right_hand_pos, right_hand_orn)
+ self.parts["right_hand"].set_position_orientation(right_hand_pos, right_hand_orn)
eye_pos, eye_orn = p.multiplyTransforms(pos, orn, eye_loc_pose[0], eye_loc_pose[1])
- self.links["eye"].set_position_orientation(eye_pos, eye_orn)
+ self.parts["eye"].set_position_orientation(eye_pos, eye_orn)
for constraint, activated in self.constraints_active.items():
if not activated and constraint != "body":
- self.links[constraint].activate_constraints()
+ self.parts[constraint].activate_constraints()
self.constraints_active[constraint] = True
- left_pos, left_orn = self.links["left_hand"].get_position_orientation()
- right_pos, right_orn = self.links["right_hand"].get_position_orientation()
+ left_pos, left_orn = self.parts["left_hand"].get_position_orientation()
+ right_pos, right_orn = self.parts["right_hand"].get_position_orientation()
- self.links["left_hand"].move(left_pos, left_orn)
- self.links["right_hand"].move(right_pos, right_orn)
- if self.constraints_active["body"]:
- self.links["body"].move(pos, orn)
-
- def set_position(self, pos):
- self.set_position_orientation(pos, self.get_orientation())
-
- def set_orientation(self, orn):
- self.set_position_orientation(self.get_position(), orn)
+ self.parts["left_hand"].move(left_pos, left_orn)
+ self.parts["right_hand"].move(right_pos, right_orn)
def get_position(self):
- return self.links["body"].get_position()
+ return self.parts["body"].get_position()
def get_rpy(self):
return p.getEulerFromQuaternion(self.get_orientation())
def get_orientation(self):
- return self.links["body"].get_orientation()
+ return self.parts["body"].get_orientation()
def get_linear_velocity(self):
- (vx, vy, vz), _ = p.getBaseVelocity(self.links["body"].get_body_id())
+ (vx, vy, vz), _ = p.getBaseVelocity(self.parts["body"].body_id)
return np.array([vx, vy, vz])
def get_angular_velocity(self):
- _, (vr, vp, vyaw) = p.getBaseVelocity(self.links["body"].get_body_id())
+ _, (vr, vp, vyaw) = p.getBaseVelocity(self.parts["body"].body_id)
return np.array([vr, vp, vyaw])
def get_end_effector_position(self):
- return self.links["right_hand"].get_position()
+ return self.parts["right_hand"].get_position()
def dump_action(self):
"""
@@ -257,15 +241,15 @@ def activate(self):
"""
Activate BehaviorRobot and all its body parts.
This bypasses the activate mechanism used in VR with the trigger press
- This is useful for non-VR setting, e.g. iGibsonEnv
+ This is useful for non-VR setting, e.g. BehaviorEnv
"""
self.first_frame = False
self.activated = True
for part_name in self.constraints_active:
self.constraints_active[part_name] = True
- self.links[part_name].activated = True
- if self.links[part_name].movement_cid is None:
- self.links[part_name].activate_constraints()
+ self.parts[part_name].activated = True
+ if self.parts[part_name].movement_cid is None:
+ self.parts[part_name].activate_constraints()
def apply_action(self, action):
"""
@@ -283,8 +267,8 @@ def apply_action(self, action):
# Either trigger press will activate robot, and teleport the user to the robot if they are using VR
if action[19] > 0 or action[27] > 0:
self.activated = True
- if self.simulator.mode == SimulatorMode.VR:
- body_pos = self.links["body"].get_position()
+ if self.simulator.can_access_vr_context:
+ body_pos = self.parts["body"].get_position()
self.simulator.set_vr_pos(pos=(body_pos[0], body_pos[1], 0), keep_height=True)
else:
frame_action = action
@@ -293,28 +277,27 @@ def apply_action(self, action):
# Disable colliders
self.set_colliders(enabled=False)
# Move user close to the body to start with
- if self.simulator.mode == SimulatorMode.VR:
- body_pos = self.links["body"].get_position()
+ if self.simulator.can_access_vr_context:
+ body_pos = self.parts["body"].get_position()
self.simulator.set_vr_pos(pos=(body_pos[0], body_pos[1], 0), keep_height=True)
# Body constraint is the last one we need to activate
- self.links["body"].activate_constraints()
+ self.parts["body"].activate_constraints()
self.first_frame = False
# Must update body first before other Vr objects, since they
# rely on its transform to calculate their own transforms,
# as an action only contains local transforms relative to the body
- self.links["body"].update(frame_action)
+ self.parts["body"].update(frame_action)
for vr_obj_name in ["left_hand", "right_hand", "eye"]:
- self.links[vr_obj_name].update(frame_action)
+ self.parts[vr_obj_name].update(frame_action)
def render_camera_image(self, modes=("rgb")):
# render frames from current eye position
- eye_pos, eye_orn = self.links["eye"].get_position_orientation()
+ eye_pos, eye_orn = self.parts["eye"].get_position_orientation()
renderer = self.simulator.renderer
mat = quat2rotmat(xyzw2wxyz(eye_orn))[:3, :3]
view_direction = mat.dot(np.array([1, 0, 0]))
- up_direction = mat.dot(np.array([0, 0, 1]))
- renderer.set_camera(eye_pos, eye_pos + view_direction, up_direction, cache=True)
+ renderer.set_camera(eye_pos, eye_pos + view_direction, [0, 0, 1], cache=True)
frames = []
for item in renderer.render(modes=modes):
frames.append(item)
@@ -326,40 +309,26 @@ def _print_positions(self):
"""
print("Data for BehaviorRobot number {}".format(self.robot_num))
print("Using hands: {}, using body: {}, using gripper: {}".format(self.hands, self.use_body, self.use_gripper))
- for k, v in self.links.items():
+ for k, v in self.parts.items():
print("{} at position {}".format(k, v.get_position()))
print("-------------------------------")
- def reset(self):
+ def robot_specific_reset(self):
pass
- @property
- def proprioception_dim(self):
- return 6 * 3 + 4
+ def get_proprioception_dim(self):
+ return 6 * 3 + 2
def get_proprioception(self):
state = OrderedDict()
-
- lh_local_pos, lh_local_orn = self.links["left_hand"].get_local_position_orientation()
- state["left_hand_position_local"] = lh_local_pos
- state["left_hand_orientation_local"] = p.getEulerFromQuaternion(lh_local_orn)
-
- rh_local_pos, rh_local_orn = self.links["right_hand"].get_local_position_orientation()
- state["right_hand_position_local"] = rh_local_pos
- state["right_hand_orientation_local"] = p.getEulerFromQuaternion(rh_local_orn)
-
- eye_local_pos, eye_local_orn = self.links["right_hand"].get_local_position_orientation()
- state["eye_position_local"] = eye_local_pos
- state["eye_orientation_local"] = p.getEulerFromQuaternion(eye_local_orn)
-
- state["left_hand_trigger_fraction"] = self.links["left_hand"].trigger_fraction
- state["left_hand_is_grasping"] = float(
- self.links["left_hand"].object_in_hand is not None and self.links["left_hand"].release_counter is None
- )
- state["right_hand_trigger_fraction"] = self.links["right_hand"].trigger_fraction
- state["right_hand_is_grasping"] = float(
- self.links["right_hand"].object_in_hand is not None and self.links["right_hand"].release_counter is None
- )
+ state["left_hand_position_local"] = self.parts["left_hand"].local_pos
+ state["left_hand_orientation_local"] = p.getEulerFromQuaternion(self.parts["left_hand"].local_orn)
+ state["right_hand_position_local"] = self.parts["right_hand"].local_pos
+ state["right_hand_orientation_local"] = p.getEulerFromQuaternion(self.parts["right_hand"].local_orn)
+ state["eye_position_local"] = self.parts["eye"].local_pos
+ state["eye_orientation_local"] = p.getEulerFromQuaternion(self.parts["eye"].local_orn)
+ state["left_hand_trigger_fraction"] = self.parts["left_hand"].trigger_fraction
+ state["right_hand_trigger_fraction"] = self.parts["right_hand"].trigger_fraction
state_list = []
for k, v in state.items():
@@ -376,35 +345,39 @@ def get_proprioception(self):
return state_list
- def is_grasping(self, candidate_obj):
+ def is_grasping(self, candidate_obj_bid):
+ # NOTE: we need to check the candidate_obj_bid and not just the
+ # candidate object itself because we set parts[...].object_in_hand
+ # to the object's id during assistive grasping, and not the object
+ # itself
return np.array(
[
- self.links["left_hand"].object_in_hand == candidate_obj,
- self.links["right_hand"].object_in_hand == candidate_obj,
+ self.parts["left_hand"].object_in_hand == candidate_obj_bid,
+ self.parts["right_hand"].object_in_hand == candidate_obj_bid,
]
)
def can_toggle(self, toggle_position, toggle_distance_threshold):
- for part_name, part in self.links.items():
+ for part_name, part in self.parts.items():
if part_name in ["left_hand", "right_hand"]:
if (
np.linalg.norm(np.array(part.get_position()) - np.array(toggle_position))
< toggle_distance_threshold
):
return True
- for finger in part.finger_tip_link_indices:
- finger_link_state = p.getLinkState(part.get_body_id(), finger)
+ for finger in FINGER_TIP_LINK_INDICES:
+ finger_link_state = p.getLinkState(part.body_id, finger)
link_pos = finger_link_state[0]
if np.linalg.norm(np.array(link_pos) - np.array(toggle_position)) < toggle_distance_threshold:
return True
return False
def dump_state(self):
- return {part_name: part.dump_part_state() for part_name, part in self.links.items()}
+ return {part_name: part.dump_part_state() for part_name, part in self.parts.items()}
def load_state(self, dump):
for part_name, part_state in dump.items():
- self.links[part_name].load_part_state(part_state)
+ self.parts[part_name].load_part_state(part_state)
class BRBody(ArticulatedObject):
@@ -412,12 +385,7 @@ class BRBody(ArticulatedObject):
A simple ellipsoid representing the robot's body.
"""
- DEFAULT_RENDERING_PARAMS = {
- "use_pbr": False,
- "use_pbr_mapping": False,
- }
-
- def __init__(self, parent, class_id=SemanticClass.ROBOTS, **kwargs):
+ def __init__(self, parent):
# Set up class
self.parent = parent
self.name = "BRBody_{}".format(self.parent.robot_num)
@@ -432,11 +400,11 @@ def __init__(self, parent, class_id=SemanticClass.ROBOTS, **kwargs):
body_path = "normal_color" if self.parent.normal_color else "alternative_color"
body_path_suffix = "vr_body.urdf" if not self.parent.use_tracked_body else "vr_body_tracker.urdf"
self.vr_body_fpath = os.path.join(assets_path, "models", "vr_agent", "vr_body", body_path, body_path_suffix)
- super(BRBody, self).__init__(
- filename=self.vr_body_fpath, scale=1, abilities={"robot": {}}, class_id=class_id, **kwargs
- )
+ super(BRBody, self).__init__(filename=self.vr_body_fpath, scale=1)
+
+ prepare_object_states(self, {"robot": {}})
- def _load(self, simulator):
+ def _load(self):
"""
Overidden load that keeps BRBody awake upon initialization.
"""
@@ -448,30 +416,19 @@ def _load(self, simulator):
# The actual body is at link 0, the base link is a "virtual" link
p.changeDynamics(body_id, 0, mass=self.mass)
p.changeDynamics(body_id, -1, mass=1e-9)
+ self.create_link_name_to_vm_map(body_id)
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- return [body_id]
+ return body_id
def set_position_orientation_unwrapped(self, pos, orn):
super(BRBody, self).set_position_orientation(pos, orn)
- def set_position_unwrapped(self, pos):
- """Set object position in the format of Array[x, y, z]"""
- old_orn = self.get_orientation()
- self.set_position_orientation_unwrapped(pos, old_orn)
-
- def set_orientation_unwrapped(self, orn):
- """Set object orientation as a quaternion in the format of Array[x, y, z, w]"""
- old_pos = self.get_position()
- self.set_position_orientation_unwrapped(old_pos, orn)
-
def set_position_orientation(self, pos, orn):
self.parent.set_position_orientation(pos, orn)
def set_colliders(self, enabled=False):
assert type(enabled) == bool
- set_all_collisions(self.get_body_id(), int(enabled))
+ set_all_collisions(self.body_id, int(enabled))
if enabled == True:
self.set_body_collision_filters()
@@ -487,7 +444,7 @@ def activate_constraints(self):
)
self.movement_cid = p.createConstraint(
- self.get_body_id(),
+ self.body_id,
-1,
-1,
-1,
@@ -504,20 +461,14 @@ def set_body_collision_filters(self):
Sets BRBody's collision filters.
"""
# Get body ids of the floor and carpets
- no_col_objs = []
- if "floors" in self.parent.simulator.scene.objects_by_category:
- no_col_objs += self.parent.simulator.scene.objects_by_category["floors"]
- if "carpet" in self.parent.simulator.scene.objects_by_category:
- no_col_objs += self.parent.simulator.scene.objects_by_category["carpet"]
-
- no_col_ids = [x.get_body_id() for x in no_col_objs]
- body_link_idxs = [-1] + [i for i in range(p.getNumJoints(self.get_body_id()))]
+ no_col_ids = self.parent.simulator.get_category_ids("floors") + self.parent.simulator.get_category_ids("carpet")
+ body_link_idxs = [-1] + [i for i in range(p.getNumJoints(self.body_id))]
for col_id in no_col_ids:
col_link_idxs = [-1] + [i for i in range(p.getNumJoints(col_id))]
for body_link_idx in body_link_idxs:
for col_link_idx in col_link_idxs:
- p.setCollisionFilterPair(self.get_body_id(), col_id, body_link_idx, col_link_idx, 0)
+ p.setCollisionFilterPair(self.body_id, col_id, body_link_idx, col_link_idx, 0)
def move(self, pos, orn):
p.changeConstraint(self.movement_cid, pos, orn, maxForce=BODY_MOVING_FORCE)
@@ -540,14 +491,19 @@ def update(self, action):
delta_pos = action[:3]
delta_orn = action[3:6]
clipped_delta_pos, clipped_delta_orn = self.clip_delta_pos_orn(delta_pos, delta_orn)
+
# Convert orientation to a quaternion
clipped_delta_orn = p.getQuaternionFromEuler(clipped_delta_orn)
# Calculate new body transform
old_pos, old_orn = self.get_position_orientation()
self.new_pos, self.new_orn = p.multiplyTransforms(old_pos, old_orn, clipped_delta_pos, clipped_delta_orn)
- self.new_pos = np.round(self.new_pos, 5).tolist()
- self.new_orn = np.round(self.new_orn, 5).tolist()
+
+ # self.new_pos = np.round(self.new_pos, 5).tolist()
+ # self.new_orn = np.round(self.new_orn, 5).tolist()
+
+ self.new_pos = list(self.new_pos)
+ self.new_orn = list(self.new_orn)
# Reset agent activates the body and its collision filters
reset_agent = action[19] > 0 or action[27] > 0
@@ -555,8 +511,8 @@ def update(self, action):
if not self.activated:
self.set_colliders(enabled=True)
self.activated = True
- self.set_position_unwrapped(self.new_pos)
- self.set_orientation_unwrapped(self.new_orn)
+ self.set_position(self.new_pos)
+ self.set_orientation(self.new_orn)
self.move(self.new_pos, self.new_orn)
@@ -573,11 +529,6 @@ class BRHandBase(ArticulatedObject):
that subclasses override most of the methods to implement their own functionality.
"""
- DEFAULT_RENDERING_PARAMS = {
- "use_pbr": False,
- "use_pbr_mapping": False,
- }
-
def __init__(
self,
parent,
@@ -585,8 +536,6 @@ def __init__(
hand="right",
base_rot=(0, 0, 0, 1),
ghost_hand_appear_threshold=HAND_GHOST_HAND_APPEAR_THRESHOLD,
- class_id=SemanticClass.ROBOTS,
- **kwargs
):
"""
Initializes BRHandBase.
@@ -606,6 +555,7 @@ def __init__(
# This base rotation is applied before any actual rotation is applied to the hand. This adjusts
# for the hand model's rotation to make it appear in the right place.
self.base_rot = base_rot
+ self.local_pos, self.local_orn = [0, 0, 0], [0, 0, 0, 1]
self.trigger_fraction = 0
# Bool indicating whether the hands have been spwaned by pressing the trigger reset
@@ -624,29 +574,22 @@ def __init__(
assets_path, "models", "vr_agent", "vr_hand", "ghost_hand_{}.obj".format(self.hand)
),
scale=[0.001] * 3,
- class_id=class_id,
)
self.ghost_hand.category = "agent"
self.ghost_hand_appear_threshold = ghost_hand_appear_threshold
if self.hand not in ["left", "right"]:
raise ValueError("ERROR: BRHandBase can only accept left or right as a hand argument!")
- super(BRHandBase, self).__init__(filename=self.fpath, scale=1, class_id=class_id, **kwargs)
+ super(BRHandBase, self).__init__(filename=self.fpath, scale=1)
- def _load(self, simulator):
+ def _load(self):
"""
Overidden load that keeps BRHandBase awake upon initialization.
"""
body_id = p.loadURDF(self.fpath, globalScaling=self.scale, flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
self.mass = p.getDynamicsInfo(body_id, -1)[0]
-
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- body_ids = [body_id]
- if self.parent.use_ghost_hands:
- body_ids.extend(self.ghost_hand.load(simulator))
-
- return body_ids
+ self.create_link_name_to_vm_map(body_id)
+ return body_id
def set_other_hand(self, other_hand):
"""
@@ -659,13 +602,21 @@ def activate_constraints(self):
# Start ghost hand where the VR hand starts
if self.parent.use_ghost_hands:
self.ghost_hand.set_position(self.get_position())
- p.changeVisualShape(self.ghost_hand.get_body_id(), -1, rgbaColor=(0, 0, 0, 0))
+ p.changeVisualShape(self.ghost_hand.body_id, -1, rgbaColor=(0, 0, 0, 0))
# change it to transparent for visualization
def set_position_orientation(self, pos, orn):
# set position and orientation of BRobot body part and update
# local transforms, note this function gets around state bound
super(BRHandBase, self).set_position_orientation(pos, orn)
+ body = self.parent.parts["body"]
+ if body.new_pos is None:
+ inv_body_pos, inv_body_orn = p.invertTransform(*body.get_position_orientation())
+ else:
+ inv_body_pos, inv_body_orn = p.invertTransform(body.new_pos, body.new_orn)
+ new_local_pos, new_local_orn = p.multiplyTransforms(inv_body_pos, inv_body_orn, pos, orn)
+ self.local_pos = new_local_pos
+ self.local_orn = new_local_orn
self.new_pos = pos
self.new_orn = orn
# Update pos and orientation of ghost hands as well
@@ -673,12 +624,6 @@ def set_position_orientation(self, pos, orn):
self.ghost_hand.set_position(self.new_pos)
self.ghost_hand.set_orientation(self.new_orn)
- def get_local_position_orientation(self):
- body = self.parent.links["body"]
- return p.multiplyTransforms(
- *p.invertTransform(*body.get_position_orientation()), *self.get_position_orientation()
- )
-
def set_position(self, pos):
self.set_position_orientation(pos, self.get_orientation())
@@ -687,7 +632,7 @@ def set_orientation(self, orn):
def set_colliders(self, enabled=False):
assert type(enabled) == bool
- set_all_collisions(self.get_body_id(), int(enabled))
+ set_all_collisions(self.body_id, int(enabled))
def clip_delta_pos_orn(self, delta_pos, delta_orn):
"""
@@ -701,7 +646,7 @@ def clip_delta_pos_orn(self, delta_pos, delta_orn):
clipped_delta_orn = clipped_delta_orn.tolist()
# Constraint position so hand doesn't go further than hand_thresh from corresponding shoulder
- if self.parent.use_tracked_body:
+ if not self.parent.use_tracked_body:
left_shoulder_rel_pos = LEFT_SHOULDER_REL_POS_TRACKED
right_shoulder_rel_pos = RIGHT_SHOULDER_REL_POS_TRACKED
else:
@@ -710,8 +655,7 @@ def clip_delta_pos_orn(self, delta_pos, delta_orn):
shoulder_point = left_shoulder_rel_pos if self.hand == "left" else right_shoulder_rel_pos
shoulder_point = np.array(shoulder_point)
- current_local_pos = np.array(self.get_local_position_orientation()[0])
- desired_local_pos = current_local_pos + np.array(clipped_delta_pos)
+ desired_local_pos = np.array(self.local_pos) + np.array(clipped_delta_pos)
shoulder_to_hand = desired_local_pos - shoulder_point
dist_to_shoulder = np.linalg.norm(shoulder_to_hand)
if dist_to_shoulder > (HAND_DISTANCE_THRESHOLD + THRESHOLD_EPSILON):
@@ -722,7 +666,7 @@ def clip_delta_pos_orn(self, delta_pos, delta_orn):
# Add to shoulder position to get final local position
reduced_local_pos = shoulder_point + reduced_shoulder_to_hand
# Calculate new delta to get to this point
- clipped_delta_pos = reduced_local_pos - current_local_pos
+ clipped_delta_pos = reduced_local_pos - np.array(self.local_pos)
return clipped_delta_pos, clipped_delta_orn
@@ -743,16 +687,20 @@ def update(self, action):
clipped_delta_orn = p.getQuaternionFromEuler(clipped_delta_orn)
# Calculate new local transform
- old_local_pos, old_local_orn = self.get_local_position_orientation()
+ old_local_pos, old_local_orn = self.local_pos, self.local_orn
_, new_local_orn = p.multiplyTransforms([0, 0, 0], clipped_delta_orn, [0, 0, 0], old_local_orn)
new_local_pos = np.array(old_local_pos) + np.array(clipped_delta_pos)
+ self.local_pos = new_local_pos
+ self.local_orn = new_local_orn
+
# Calculate new world position based on local transform and new body pose
- body = self.parent.links["body"]
+ body = self.parent.parts["body"]
self.new_pos, self.new_orn = p.multiplyTransforms(body.new_pos, body.new_orn, new_local_pos, new_local_orn)
+
# Round to avoid numerical inaccuracies
- self.new_pos = np.round(self.new_pos, 5).tolist()
- self.new_orn = np.round(self.new_orn, 5).tolist()
+ # self.new_pos = np.round(self.new_pos, 5).tolist()
+ # self.new_orn = np.round(self.new_orn, 5).tolist()
# Reset agent activates the body and its collision filters
if self.hand == "left":
@@ -773,7 +721,7 @@ def update(self, action):
else:
delta_trig_frac = action[26]
- new_trig_frac = np.clip(self.trigger_fraction + delta_trig_frac, 0.0, 1.0)
+ new_trig_frac = self.trigger_fraction + delta_trig_frac
self.set_close_fraction(new_trig_frac)
self.trigger_fraction = new_trig_frac
@@ -816,10 +764,14 @@ def update_ghost_hands(self):
def dump_part_state(self):
return {
+ "local_pos": list(self.local_pos),
+ "local_orn": list(self.local_orn),
"trigger_fraction": self.trigger_fraction,
}
def load_part_state(self, dump):
+ self.local_pos = np.array(dump["local_pos"])
+ self.local_orn = np.array(dump["local_orn"])
self.trigger_fraction = dump["trigger_fraction"]
@@ -828,7 +780,7 @@ class BRHand(BRHandBase):
Represents the human hand used for VR programs and robotics applications.
"""
- def __init__(self, parent, hand="right", **kwargs):
+ def __init__(self, parent, hand="right"):
hand_path = "normal_color" if parent.normal_color else "alternative_color"
self.vr_hand_folder = os.path.join(assets_path, "models", "vr_agent", "vr_hand", hand_path)
final_suffix = "{}_{}.urdf".format("vr_hand_vhacd", hand)
@@ -839,7 +791,6 @@ def __init__(self, parent, hand="right", **kwargs):
hand=hand,
base_rot=base_rot_handed,
ghost_hand_appear_threshold=HAND_GHOST_HAND_APPEAR_THRESHOLD,
- **kwargs
)
# Variables for assisted grasping
@@ -854,25 +805,15 @@ def __init__(self, parent, hand="right", **kwargs):
self.candidate_data = None
self.movement_cid = None
- def load(self, simulator):
- ids = super(BRHand, self).load(simulator)
-
- self.palm_link_idx = link_from_name(self.get_body_id(), PALM_LINK_NAME)
- self.finger_tip_link_indices = {link_from_name(self.get_body_id(), name) for name in FINGER_TIP_LINK_NAMES}
- self.thumb_link_idx = link_from_name(self.get_body_id(), THUMB_LINK_NAME)
- self.non_thumb_fingers = self.finger_tip_link_indices - {self.thumb_link_idx}
-
- return ids
-
def activate_constraints(self):
- p.changeDynamics(self.get_body_id(), -1, mass=1, lateralFriction=HAND_FRICTION)
- for joint_index in range(p.getNumJoints(self.get_body_id())):
+ p.changeDynamics(self.body_id, -1, mass=1, lateralFriction=HAND_FRICTION)
+ for joint_index in range(p.getNumJoints(self.body_id)):
# Make masses larger for greater stability
# Mass is in kg, friction is coefficient
- p.changeDynamics(self.get_body_id(), joint_index, mass=0.1, lateralFriction=HAND_FRICTION)
- p.resetJointState(self.get_body_id(), joint_index, targetValue=0, targetVelocity=0.0)
+ p.changeDynamics(self.body_id, joint_index, mass=0.1, lateralFriction=HAND_FRICTION)
+ p.resetJointState(self.body_id, joint_index, targetValue=0, targetVelocity=0.0)
p.setJointMotorControl2(
- self.get_body_id(),
+ self.body_id,
joint_index,
controlMode=p.POSITION_CONTROL,
targetPosition=0,
@@ -881,10 +822,10 @@ def activate_constraints(self):
velocityGain=0.1,
force=0,
)
- p.setJointMotorControl2(self.get_body_id(), joint_index, controlMode=p.VELOCITY_CONTROL, targetVelocity=0.0)
+ p.setJointMotorControl2(self.body_id, joint_index, controlMode=p.VELOCITY_CONTROL, targetVelocity=0.0)
# Create constraint that can be used to move the hand
self.movement_cid = p.createConstraint(
- self.get_body_id(),
+ self.body_id,
-1,
-1,
-1,
@@ -904,20 +845,18 @@ def set_hand_coll_filter(self, target_id, enable):
:param enable: whether to enable/disable collisions
"""
target_link_idxs = [-1] + [i for i in range(p.getNumJoints(target_id))]
- body_link_idxs = [-1] + [i for i in range(p.getNumJoints(self.get_body_id()))]
+ body_link_idxs = [-1] + [i for i in range(p.getNumJoints(self.body_id))]
for body_link_idx in body_link_idxs:
for target_link_idx in target_link_idxs:
- p.setCollisionFilterPair(
- self.get_body_id(), target_id, body_link_idx, target_link_idx, 1 if enable else 0
- )
+ p.setCollisionFilterPair(self.body_id, target_id, body_link_idx, target_link_idx, 1 if enable else 0)
def gen_freeze_vals(self):
"""
Generate joint values to freeze joints at.
"""
- for joint_index in range(p.getNumJoints(self.get_body_id())):
- j_val = p.getJointState(self.get_body_id(), joint_index)[0]
+ for joint_index in range(p.getNumJoints(self.body_id)):
+ j_val = p.getJointState(self.body_id, joint_index)[0]
self.freeze_vals[joint_index] = j_val
def freeze_joints(self):
@@ -925,21 +864,22 @@ def freeze_joints(self):
Freezes hand joints - used in assisted grasping.
"""
for joint_index, j_val in self.freeze_vals.items():
- p.resetJointState(self.get_body_id(), joint_index, targetValue=j_val, targetVelocity=0.0)
+ p.resetJointState(self.body_id, joint_index, targetValue=j_val, targetVelocity=0.0)
def find_raycast_candidates(self):
"""
Calculates the body id and link that have the most fingertip-palm ray intersections.
"""
# Store unique ray start/end points for visualization
- palm_link_state = p.getLinkState(self.get_body_id(), 0)
+ raypoints = []
+ palm_link_state = p.getLinkState(self.body_id, 0)
palm_pos = palm_link_state[0]
palm_orn = palm_link_state[1]
palm_base_pos, _ = p.multiplyTransforms(palm_pos, palm_orn, PALM_BASE_POS, [0, 0, 0, 1])
palm_center_pos = np.copy(PALM_CENTER_POS)
palm_center_pos[1] *= 1 if self.hand == "right" else -1
palm_center_pos, _ = p.multiplyTransforms(palm_pos, palm_orn, palm_center_pos, [0, 0, 0, 1])
- thumb_link_state = p.getLinkState(self.get_body_id(), self.thumb_link_idx)
+ thumb_link_state = p.getLinkState(self.body_id, THUMB_LINK_INDEX)
thumb_pos = thumb_link_state[0]
thumb_orn = thumb_link_state[1]
thumb_1_pos = np.copy(THUMB_1_POS)
@@ -949,11 +889,12 @@ def find_raycast_candidates(self):
thumb_1, _ = p.multiplyTransforms(thumb_pos, thumb_orn, thumb_2_pos, [0, 0, 0, 1])
thumb_2, _ = p.multiplyTransforms(thumb_pos, thumb_orn, thumb_1_pos, [0, 0, 0, 1])
# Repeat for each of 4 fingers
+ raypoints.extend([palm_base_pos, palm_center_pos, thumb_1, thumb_2])
raycast_startpoints = [palm_base_pos, palm_center_pos, thumb_1, thumb_2] * 4
raycast_endpoints = []
- for lk in self.non_thumb_fingers:
- finger_link_state = p.getLinkState(self.get_body_id(), lk)
+ for lk in NON_THUMB_FINGERS:
+ finger_link_state = p.getLinkState(self.body_id, lk)
link_pos = finger_link_state[0]
link_orn = finger_link_state[1]
@@ -961,13 +902,10 @@ def find_raycast_candidates(self):
finger_tip_pos[1] *= 1 if self.hand == "right" else -1
finger_tip_pos, _ = p.multiplyTransforms(link_pos, link_orn, finger_tip_pos, [0, 0, 0, 1])
+ raypoints.append(finger_tip_pos)
raycast_endpoints.extend([finger_tip_pos] * 4)
- if VISUALIZE_RAYS:
- for f, t in zip(raycast_startpoints, raycast_endpoints):
- p.addUserDebugLine(f, t, [1, 0, 0], 0.01)
-
- # Raycast from each start point to each end point - 16 in total.
+ # Raycast from each start point to each end point - 8 in total between 4 finger start points and 2 palm end points
ray_results = p.rayTestBatch(raycast_startpoints, raycast_endpoints)
if not ray_results:
return None
@@ -975,18 +913,18 @@ def find_raycast_candidates(self):
for ray_res in ray_results:
bid, link_idx, fraction, _, _ = ray_res
# Skip intersections with the hand itself
- if bid == -1 or bid == self.get_body_id():
+ if bid == -1 or bid == self.body_id:
continue
ray_data.append((bid, link_idx))
return ray_data
- def find_hand_contacts(self, find_all=False, return_contact_positions=False):
+ def find_hand_contacts(self, find_all=False):
"""
Calculates the body ids and links that have force applied to them by the VR hand.
"""
# Get collisions
- cpts = p.getContactPoints(self.get_body_id())
+ cpts = p.getContactPoints(self.body_id)
if not cpts:
return None
@@ -994,15 +932,11 @@ def find_hand_contacts(self, find_all=False, return_contact_positions=False):
for i in range(len(cpts)):
cpt = cpts[i]
# Don't attach to links that are not finger tip
- if (not find_all) and (cpt[3] not in self.finger_tip_link_indices):
+ if (not find_all) and (cpt[3] not in FINGER_TIP_LINK_INDICES):
continue
c_bid = cpt[2]
c_link = cpt[4]
- c_contact_pos = cpt[5]
- if return_contact_positions:
- contact_data.append((c_bid, c_link, c_contact_pos))
- else:
- contact_data.append((c_bid, c_link))
+ contact_data.append((c_bid, c_link))
return contact_data
@@ -1017,7 +951,7 @@ def calculate_ag_object(self):
return None
# Step 2 - find the closest object to the palm center among these "inside" objects
- palm_state = p.getLinkState(self.get_body_id(), 0)
+ palm_state = p.getLinkState(self.body_id, 0)
palm_center_pos = np.copy(PALM_CENTER_POS)
palm_center_pos[1] *= 1 if self.hand == "right" else -1
palm_center_pos, _ = p.multiplyTransforms(palm_state[0], palm_state[1], palm_center_pos, [0, 0, 0, 1])
@@ -1038,7 +972,8 @@ def calculate_ag_object(self):
return None
# Step 3 - Make sure we are applying a force to this object
- force_data = self.find_hand_contacts()
+ force_data = self.find_hand_contacts()
+
if not force_data or (ag_bid, ag_link) not in force_data:
return None
@@ -1046,14 +981,14 @@ def calculate_ag_object(self):
if (
not self.parent.simulator.can_assisted_grasp(ag_bid, ag_link)
or (self.other_hand and self.other_hand.object_in_hand == ag_bid)
- or ("body" in self.parent.links and self.parent.links["body"].get_body_id() == ag_bid)
- or (self.other_hand and self.other_hand.get_body_id() == ag_bid)
+ or ("body" in self.parent.parts and self.parent.parts["body"].body_id == ag_bid)
+ or (self.other_hand and self.other_hand.body_id == ag_bid)
):
return None
return ag_bid, ag_link
- def handle_assisted_grasping(self, action, override_ag_data=None):
+ def handle_assisted_grasping(self, action, override_ag_data=None, bypass_force_check=False):
"""
Handles assisted grasping.
:param action: numpy array of actions.
@@ -1063,7 +998,7 @@ def handle_assisted_grasping(self, action, override_ag_data=None):
else:
delta_trig_frac = action[26]
- new_trig_frac = np.clip(self.trigger_fraction + delta_trig_frac, 0.0, 1.0)
+ new_trig_frac = self.trigger_fraction + delta_trig_frac
# Execute gradual release of object
if self.release_counter is not None:
@@ -1080,72 +1015,46 @@ def handle_assisted_grasping(self, action, override_ag_data=None):
if not self.object_in_hand:
# Detect valid trig fraction that is above threshold
- if new_trig_frac > TRIGGER_FRACTION_THRESHOLD:
+ # print(new_trig_frac)
+ if new_trig_frac >= 0.0 and new_trig_frac <= 1.0 and new_trig_frac > TRIGGER_FRACTION_THRESHOLD:
if override_ag_data is not None:
ag_data = override_ag_data
force_data = self.find_hand_contacts(find_all=True)
- # print(ag_data, force_data)
- # from IPython import embed; embed()
- if not force_data or ag_data not in force_data:
+ if not bypass_force_check and (not force_data or ag_data not in force_data):
return False
else:
+ if bypass_force_check:
+ print("override_ag_data must be given with bypass_force_check in handle_assisted_grasping")
+ return False
ag_data = self.calculate_ag_object()
# Return early if no AG-valid object can be grasped
if not ag_data:
return False
+
ag_bid, ag_link = ag_data
- # Create a p2p joint if it's a child link of a fixed URDF that is connected by a revolute or prismatic joint
- if (
- ag_link != -1
- and p.getJointInfo(ag_bid, ag_link)[2] in [p.JOINT_REVOLUTE, p.JOINT_PRISMATIC]
- and ag_bid in self.parent.simulator.scene.objects_by_id
- and hasattr(self.parent.simulator.scene.objects_by_id[ag_bid], "main_body_is_fixed")
- and self.parent.simulator.scene.objects_by_id[ag_bid].main_body_is_fixed
- ):
- joint_type = p.JOINT_POINT2POINT
- else:
- joint_type = p.JOINT_FIXED
+ child_frame_pos, child_frame_orn = self.get_child_frame_pose(ag_bid, ag_link)
- force_data = self.find_hand_contacts(return_contact_positions=True)
- contact_pos = None
- for c_bid, c_link, c_contact_pos in force_data:
- if (c_bid, c_link) == ag_data:
- contact_pos = c_contact_pos
- break
- assert contact_pos is not None
-
- # Joint frame set at the contact point
- joint_frame_pos = contact_pos
- joint_frame_orn = [0, 0, 0, 1]
- palm_link_pos, palm_link_orn = p.getLinkState(self.get_body_id(), self.palm_link_idx)[:2]
- inv_palm_link_pos, inv_palm_link_orn = p.invertTransform(palm_link_pos, palm_link_orn)
- parent_frame_pos, parent_frame_orn = p.multiplyTransforms(
- inv_palm_link_pos, inv_palm_link_orn, joint_frame_pos, joint_frame_orn
- )
+ # If we grab a child link of a URDF, create a p2p joint
if ag_link == -1:
- obj_pos, obj_orn = p.getBasePositionAndOrientation(ag_bid)
+ joint_type = p.JOINT_FIXED
else:
- obj_pos, obj_orn = p.getLinkState(ag_bid, ag_link)[:2]
- inv_obj_pos, inv_obj_orn = p.invertTransform(obj_pos, obj_orn)
- child_frame_pos, child_frame_orn = p.multiplyTransforms(
- inv_obj_pos, inv_obj_orn, joint_frame_pos, joint_frame_orn
- )
+ joint_type = p.JOINT_POINT2POINT
+
self.obj_cid = p.createConstraint(
- parentBodyUniqueId=self.get_body_id(),
- parentLinkIndex=self.palm_link_idx,
+ parentBodyUniqueId=self.body_id,
+ parentLinkIndex=PALM_LINK_INDEX,
childBodyUniqueId=ag_bid,
childLinkIndex=ag_link,
jointType=joint_type,
jointAxis=(0, 0, 0),
- parentFramePosition=parent_frame_pos,
+ parentFramePosition=(0, 0, 0),
childFramePosition=child_frame_pos,
- parentFrameOrientation=parent_frame_orn,
childFrameOrientation=child_frame_orn,
)
# Modify max force based on user-determined assist parameters
- if joint_type == p.JOINT_FIXED:
+ if ag_link == -1:
max_force = ASSIST_FORCE
else:
max_force = ASSIST_FORCE * ARTICULATED_ASSIST_FRACTION
@@ -1156,10 +1065,6 @@ def handle_assisted_grasping(self, action, override_ag_data=None):
"childLinkIndex": ag_link,
"jointType": joint_type,
"maxForce": max_force,
- "parentFramePosition": parent_frame_pos,
- "childFramePosition": child_frame_pos,
- "parentFrameOrientation": parent_frame_orn,
- "childFrameOrientation": child_frame_orn,
}
self.object_in_hand = ag_bid
self.should_freeze_joints = True
@@ -1167,9 +1072,18 @@ def handle_assisted_grasping(self, action, override_ag_data=None):
self.set_hand_coll_filter(ag_bid, False)
self.gen_freeze_vals()
return True
+
+ else:
+ return False
+
else:
constraint_violation = self.get_constraint_violation(self.obj_cid)
- if new_trig_frac <= TRIGGER_FRACTION_THRESHOLD or constraint_violation > CONSTRAINT_VIOLATION_THRESHOLD:
+ if (
+ new_trig_frac >= 0.0
+ and new_trig_frac <= 1.0
+ and new_trig_frac <= TRIGGER_FRACTION_THRESHOLD
+ or constraint_violation > CONSTRAINT_VIOLATION_THRESHOLD
+ ):
p.removeConstraint(self.obj_cid)
self.obj_cid = None
self.obj_cid_params = {}
@@ -1228,7 +1142,7 @@ def update(self, action):
"""
# AG is only enable for the reduced joint hand
if ASSIST_FRACTION > 0:
- self.handle_assisted_grasping(action)
+ grasp_success = self.handle_assisted_grasping(action)
super(BRHand, self).update(action)
@@ -1245,22 +1159,56 @@ def set_close_fraction(self, close_frac):
if self.should_freeze_joints:
return
- for joint_index in range(p.getNumJoints(self.get_body_id())):
- jf = p.getJointInfo(self.get_body_id(), joint_index)
+ # Clip close fraction to make sure it stays within [0, 1] range
+ clipped_close_frac = np.clip([close_frac], 0, 1)[0]
+
+ for joint_index in range(p.getNumJoints(self.body_id)):
+ jf = p.getJointInfo(self.body_id, joint_index)
j_name = jf[1]
# Thumb has different close fraction to fingers
if j_name.decode("utf-8")[0] == "T":
close_pos = THUMB_CLOSE_POSITION
else:
close_pos = FINGER_CLOSE_POSITION
- interp_frac = (close_pos - HAND_OPEN_POSITION) * close_frac
+ interp_frac = (close_pos - HAND_OPEN_POSITION) * clipped_close_frac
target_pos = HAND_OPEN_POSITION + interp_frac
p.setJointMotorControl2(
- self.get_body_id(), joint_index, p.POSITION_CONTROL, targetPosition=target_pos, force=HAND_CLOSE_FORCE
+ self.body_id, joint_index, p.POSITION_CONTROL, targetPosition=target_pos, force=HAND_CLOSE_FORCE
)
+ def get_child_frame_pose(self, ag_bid, ag_link):
+ # Different pos/orn calculations for base/links
+ if ag_link == -1:
+ body_pos, body_orn = p.getBasePositionAndOrientation(ag_bid)
+ else:
+ body_pos, body_orn = p.getLinkState(ag_bid, ag_link)[:2]
+
+ # Get inverse world transform of body frame
+ inv_body_pos, inv_body_orn = p.invertTransform(body_pos, body_orn)
+ link_state = p.getLinkState(self.body_id, PALM_LINK_INDEX)
+ link_pos = link_state[0]
+ link_orn = link_state[1]
+ # B * T = P -> T = (B-1)P, where B is body transform, T is target transform and P is palm transform
+ child_frame_pos, child_frame_orn = p.multiplyTransforms(inv_body_pos, inv_body_orn, link_pos, link_orn)
+
+ return child_frame_pos, child_frame_orn
+
def dump_part_state(self):
dump = super(BRHand, self).dump_part_state()
+
+ # Recompute child frame pose because it could have changed since the
+ # constraint has been created
+ if self.obj_cid is not None:
+ ag_bid = self.obj_cid_params["childBodyUniqueId"]
+ ag_link = self.obj_cid_params["childLinkIndex"]
+ child_frame_pos, child_frame_orn = self.get_child_frame_pose(ag_bid, ag_link)
+ self.obj_cid_params.update(
+ {
+ "childFramePosition": child_frame_pos,
+ "childFrameOrientation": child_frame_orn,
+ }
+ )
+
dump.update(
{
"object_in_hand": self.object_in_hand,
@@ -1292,15 +1240,14 @@ def load_part_state(self, dump):
self.obj_cid_params = dump["obj_cid_params"]
if self.obj_cid is not None:
self.obj_cid = p.createConstraint(
- parentBodyUniqueId=self.get_body_id(),
- parentLinkIndex=self.palm_link_idx,
+ parentBodyUniqueId=self.body_id,
+ parentLinkIndex=PALM_LINK_INDEX,
childBodyUniqueId=dump["obj_cid_params"]["childBodyUniqueId"],
childLinkIndex=dump["obj_cid_params"]["childLinkIndex"],
jointType=dump["obj_cid_params"]["jointType"],
jointAxis=(0, 0, 0),
- parentFramePosition=dump["obj_cid_params"]["parentFramePosition"],
+ parentFramePosition=(0, 0, 0),
childFramePosition=dump["obj_cid_params"]["childFramePosition"],
- parentFrameOrientation=dump["obj_cid_params"]["parentFrameOrientation"],
childFrameOrientation=dump["obj_cid_params"]["childFrameOrientation"],
)
p.changeConstraint(self.obj_cid, maxForce=dump["obj_cid_params"]["maxForce"])
@@ -1325,7 +1272,7 @@ class BRGripper(BRHandBase):
Gripper utilizing the pybullet gripper URDF.
"""
- def __init__(self, parent, hand="right", **kwargs):
+ def __init__(self, parent, hand="right"):
gripper_path = "normal_color" if self.parent.normal_color else "alternative_color"
vr_gripper_fpath = os.path.join(
assets_path, "models", "vr_agent", "vr_gripper", gripper_path, "vr_gripper.urdf"
@@ -1336,7 +1283,6 @@ def __init__(self, parent, hand="right", **kwargs):
hand=hand,
base_rot=p.getQuaternionFromEuler([0, 0, 0]),
ghost_hand_appear_threshold=GRIPPER_GHOST_HAND_APPEAR_THRESHOLD,
- **kwargs
)
def activate_constraints(self):
@@ -1350,19 +1296,19 @@ def activate_constraints(self):
)
)
- for joint_idx in range(p.getNumJoints(self.get_body_id())):
- p.resetJointState(self.get_body_id(), joint_idx, GRIPPER_JOINT_POSITIONS[joint_idx])
- p.setJointMotorControl2(self.get_body_id(), joint_idx, p.POSITION_CONTROL, targetPosition=0, force=0)
+ for joint_idx in range(p.getNumJoints(self.body_id)):
+ p.resetJointState(self.body_id, joint_idx, GRIPPER_JOINT_POSITIONS[joint_idx])
+ p.setJointMotorControl2(self.body_id, joint_idx, p.POSITION_CONTROL, targetPosition=0, force=0)
# Movement constraint
self.movement_cid = p.createConstraint(
- self.get_body_id(), -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0.2, 0, 0], self.get_position()
+ self.body_id, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0.2, 0, 0], self.get_position()
)
# Gripper gear constraint
self.grip_cid = p.createConstraint(
- self.get_body_id(),
+ self.body_id,
0,
- self.get_body_id(),
+ self.body_id,
2,
jointType=p.JOINT_GEAR,
jointAxis=[0, 1, 0],
@@ -1374,11 +1320,14 @@ def activate_constraints(self):
def set_close_fraction(self, close_frac):
# PyBullet recommmends doing this to keep the gripper centered/symmetric
- b = p.getJointState(self.get_body_id(), 2)[0]
- p.setJointMotorControl2(self.get_body_id(), 0, p.POSITION_CONTROL, targetPosition=b, force=3)
+ b = p.getJointState(self.body_id, 2)[0]
+ p.setJointMotorControl2(self.body_id, 0, p.POSITION_CONTROL, targetPosition=b, force=3)
+
+ # Clip close fraction to make sure it stays within [0, 1] range
+ clipped_close_frac = np.clip([close_frac], 0, 1)[0]
# Change gear constraint to reflect trigger close fraction
- p.changeConstraint(self.grip_cid, gearRatio=1, erp=1, relativePositionTarget=1 - close_frac, maxForce=3)
+ p.changeConstraint(self.grip_cid, gearRatio=1, erp=1, relativePositionTarget=1 - clipped_close_frac, maxForce=3)
class BREye(ArticulatedObject):
@@ -1387,8 +1336,9 @@ class BREye(ArticulatedObject):
to move the camera and render the same thing that the VR users see.
"""
- def __init__(self, parent, class_id=SemanticClass.ROBOTS, **kwargs):
+ def __init__(self, parent):
# Set up class
+ self.local_pos, self.local_orn = [0, 0, 0], [0, 0, 0, 1]
self.parent = parent
self.name = "BREye_{}".format(self.parent.robot_num)
@@ -1399,38 +1349,25 @@ def __init__(self, parent, class_id=SemanticClass.ROBOTS, **kwargs):
color_folder = "normal_color" if self.parent.normal_color else "alternative_color"
self.head_visual_path = os.path.join(assets_path, "models", "vr_agent", "vr_eye", color_folder, "vr_head.obj")
self.eye_path = os.path.join(assets_path, "models", "vr_agent", "vr_eye", "vr_eye.urdf")
- super(BREye, self).__init__(filename=self.eye_path, scale=1, class_id=class_id, **kwargs)
+ super(BREye, self).__init__(filename=self.eye_path, scale=1)
self.should_hide = True
self.head_visual_marker = VisualMarker(
- visual_shape=p.GEOM_MESH, filename=self.head_visual_path, scale=[0.08] * 3, class_id=class_id
- )
- self.neck_cid = None
-
- def _load(self, simulator):
- flags = p.URDF_USE_MATERIAL_COLORS_FROM_MTL | p.URDF_ENABLE_SLEEPING
- body_id = p.loadURDF(self.filename, globalScaling=self.scale, flags=flags)
-
- # Set a minimal mass
- self.mass = 1e-9
- p.changeDynamics(body_id, -1, self.mass)
-
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- body_ids = [body_id] + self.head_visual_marker.load(simulator)
-
- return body_ids
-
- def get_local_position_orientation(self):
- body = self.parent.links["body"]
- return p.multiplyTransforms(
- *p.invertTransform(*body.get_position_orientation()), *self.get_position_orientation()
+ visual_shape=p.GEOM_MESH, filename=self.head_visual_path, scale=[0.08] * 3
)
def set_position_orientation(self, pos, orn):
# set position and orientation of BRobot body part and update
# local transforms, note this function gets around state bound
super(BREye, self).set_position_orientation(pos, orn)
+ body = self.parent.parts["body"]
+ if body.new_pos is None:
+ inv_body_pos, inv_body_orn = p.invertTransform(*body.get_position_orientation())
+ else:
+ inv_body_pos, inv_body_orn = p.invertTransform(body.new_pos, body.new_orn)
+ new_local_pos, new_local_orn = p.multiplyTransforms(inv_body_pos, inv_body_orn, pos, orn)
+ self.local_pos = new_local_pos
+ self.local_orn = new_local_orn
self.new_pos = pos
self.new_orn = orn
self.head_visual_marker.set_position_orientation(self.new_pos, self.new_orn)
@@ -1452,14 +1389,12 @@ def clip_delta_pos_orn(self, delta_pos, delta_orn):
clipped_delta_orn = np.clip(delta_orn, -HEAD_ANGULAR_VELOCITY, HEAD_ANGULAR_VELOCITY)
clipped_delta_orn = clipped_delta_orn.tolist()
- if self.parent.use_tracked_body:
+ if not self.parent.use_tracked_body:
neck_base_rel_pos = NECK_BASE_REL_POS_TRACKED
else:
neck_base_rel_pos = NECK_BASE_REL_POS_UNTRACKED
-
neck_base_point = np.array(neck_base_rel_pos)
- current_local_pos = np.array(self.get_local_position_orientation()[0])
- desired_local_pos = current_local_pos + np.array(clipped_delta_pos)
+ desired_local_pos = np.array(self.local_pos) + np.array(clipped_delta_pos)
neck_to_head = desired_local_pos - neck_base_point
dist_to_neck = np.linalg.norm(neck_to_head)
if dist_to_neck > (HEAD_DISTANCE_THRESHOLD + THRESHOLD_EPSILON):
@@ -1467,7 +1402,7 @@ def clip_delta_pos_orn(self, delta_pos, delta_orn):
shrink_factor = HEAD_DISTANCE_THRESHOLD / dist_to_neck
reduced_neck_to_head = neck_to_head * shrink_factor
reduced_local_pos = neck_base_point + reduced_neck_to_head
- clipped_delta_pos = reduced_local_pos - current_local_pos
+ clipped_delta_pos = reduced_local_pos - np.array(self.local_pos)
return clipped_delta_pos, clipped_delta_orn
@@ -1488,39 +1423,26 @@ def update(self, action):
clipped_delta_orn = p.getQuaternionFromEuler(clipped_delta_orn)
# Calculate new local transform
- current_local_pos, current_local_orn = self.get_local_position_orientation()
- _, new_local_orn = p.multiplyTransforms([0, 0, 0], clipped_delta_orn, [0, 0, 0], current_local_orn)
- new_local_pos = np.array(current_local_pos) + np.array(clipped_delta_pos)
-
- # Calculate new world position based on new local transform and current body pose
- body = self.parent.links["body"]
- self.new_pos, self.new_orn = p.multiplyTransforms(
- body.get_position(), body.get_orientation(), new_local_pos, new_local_orn
- )
+ old_local_pos, old_local_orn = self.local_pos, self.local_orn
+ _, new_local_orn = p.multiplyTransforms([0, 0, 0], clipped_delta_orn, [0, 0, 0], old_local_orn)
+ new_local_pos = np.array(old_local_pos) + np.array(clipped_delta_pos)
+
+ self.local_pos = new_local_pos
+ self.local_orn = new_local_orn
+
+ # Calculate new world position based on local transform and new body pose
+ body = self.parent.parts["body"]
+ self.new_pos, self.new_orn = p.multiplyTransforms(body.new_pos, body.new_orn, new_local_pos, new_local_orn)
self.new_pos = np.round(self.new_pos, 5).tolist()
self.new_orn = np.round(self.new_orn, 5).tolist()
self.set_position_orientation(self.new_pos, self.new_orn)
- if self.neck_cid is not None:
- p.removeConstraint(self.neck_cid)
-
- # Create a rigid constraint between the body and the head such that the head will move with the body during the
- # next physics simulation duration. Set the joint frame to be aligned with the child frame (URDF standard)
- self.neck_cid = p.createConstraint(
- parentBodyUniqueId=body.get_body_id(),
- parentLinkIndex=-1,
- childBodyUniqueId=self.get_body_id(),
- childLinkIndex=-1,
- jointType=p.JOINT_FIXED,
- jointAxis=[0, 0, 0],
- parentFramePosition=new_local_pos,
- childFramePosition=[0, 0, 0],
- parentFrameOrientation=new_local_orn,
- childFrameOrientation=[0, 0, 0, 1],
- )
-
def dump_part_state(self):
- pass
+ return {
+ "local_pos": list(self.local_pos),
+ "local_orn": list(self.local_orn),
+ }
def load_part_state(self, dump):
- pass
+ self.local_pos = np.array(dump["local_pos"])
+ self.local_orn = np.array(dump["local_orn"])
diff --git a/igibson/robots/fetch.py b/igibson/robots/fetch.py
deleted file mode 100644
index 1680af6b2..000000000
--- a/igibson/robots/fetch.py
+++ /dev/null
@@ -1,351 +0,0 @@
-import os
-
-import numpy as np
-import pybullet as p
-
-import igibson
-from igibson.controllers import ControlType
-from igibson.external.pybullet_tools.utils import set_joint_positions
-from igibson.robots.active_camera_robot import ActiveCameraRobot
-from igibson.robots.manipulation_robot import ManipulationRobot
-from igibson.robots.two_wheel_robot import TwoWheelRobot
-from igibson.utils.constants import SemanticClass
-from igibson.utils.python_utils import assert_valid_key
-
-DEFAULT_ARM_POSES = {
- "vertical",
- "diagonal15",
- "diagonal30",
- "diagonal45",
- "horizontal",
-}
-
-
-class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
- """
- Fetch Robot
- Reference: https://fetchrobotics.com/robotics-platforms/fetch-mobile-manipulator/
- """
-
- def __init__(
- self,
- control_freq=None,
- action_type="continuous",
- action_normalize=True,
- proprio_obs="default",
- controller_config=None,
- base_name=None,
- scale=1.0,
- self_collision=True,
- class_id=SemanticClass.ROBOTS,
- rendering_params=None,
- assisted_grasping_mode=None,
- rigid_trunk=False,
- default_trunk_offset=0.365,
- default_arm_pose="vertical",
- ):
- """
- :param control_freq: float, control frequency (in Hz) at which to control the robot. If set to be None,
- simulator.import_robot will automatically set the control frequency to be 1 / render_timestep by default.
- :param action_type: str, one of {discrete, continuous} - what type of action space to use
- :param action_normalize: bool, whether to normalize inputted actions. This will override any default values
- specified by this class.
- :param proprio_obs: str or tuple of str, proprioception observation key(s) to use for generating proprioceptive
- observations. If str, should be exactly "default" -- this results in the default proprioception observations
- being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict for valid key choices
- :param controller_config: None or Dict[str, ...], nested dictionary mapping controller name(s) to specific controller
- configurations for this robot. This will override any default values specified by this class.
- :param base_name: None or str, robot link name that will represent the entire robot's frame of reference. If not None,
- this should correspond to one of the link names found in this robot's corresponding URDF / MJCF file.
- None defaults to the first link name used in @model_file
- :param scale: int, scaling factor for model (default is 1)
- :param self_collision: bool, whether to enable self collision
- :param class_id: SemanticClass, semantic class this robot belongs to. Default is SemanticClass.ROBOTS.
- :param rendering_params: None or Dict[str, Any], If not None, should be keyword-mapped rendering options to set.
- See DEFAULT_RENDERING_PARAMS for the values passed by default.
- :param assisted_grasping_mode: None or str, One of {None, "soft", "strict"}. If None, no assisted grasping
- will be used. If "soft", will magnetize any object touching the gripper's fingers. If "strict" will require
- the object to be within the gripper bounding box before assisting.
- :param rigid_trunk: bool, if True, will prevent the trunk from moving during execution.
- :param default_trunk_offset: float, sets the default height of the robot's trunk
- :param default_arm_pose: Default pose for the robot arm. Should be one of {"vertical", "diagonal15",
- "diagonal30", "diagonal45", "horizontal"}
- """
- # Store args
- self.rigid_trunk = rigid_trunk
- self.default_trunk_offset = default_trunk_offset
- assert_valid_key(key=default_arm_pose, valid_keys=DEFAULT_ARM_POSES, name="default_arm_pose")
- self.default_arm_pose = default_arm_pose
-
- # Run super init
- super().__init__(
- control_freq=control_freq,
- action_type=action_type,
- action_normalize=action_normalize,
- controller_config=controller_config,
- base_name=base_name,
- scale=scale,
- self_collision=self_collision,
- class_id=class_id,
- rendering_params=rendering_params,
- assisted_grasping_mode=assisted_grasping_mode,
- )
-
- @property
- def tucked_default_joint_pos(self):
- return np.array(
- [
- 0.0,
- 0.0, # wheels
- 0.02, # trunk
- 0.0,
- 0.0, # head
- 1.1707963267948966,
- 1.4707963267948965,
- -0.4,
- 1.6707963267948966,
- 0.0,
- 1.5707963267948966,
- 0.0, # arm
- 0.05,
- 0.05, # gripper
- ]
- )
-
- @property
- def untucked_default_joint_pos(self):
- pos = np.zeros(self.n_joints)
- pos[self.base_control_idx] = 0.0
- pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
- pos[self.camera_control_idx] = np.array([0.0, 0.45])
- pos[self.gripper_control_idx] = np.array([0.05, 0.05]) # open gripper
-
- # Choose arm based on setting
- if self.default_arm_pose == "vertical":
- pos[self.arm_control_idx] = np.array([-0.94121, -0.64134, 1.55186, 1.65672, -0.93218, 1.53416, 2.14474])
- elif self.default_arm_pose == "diagonal15":
- pos[self.arm_control_idx] = np.array([-0.95587, -0.34778, 1.46388, 1.47821, -0.93813, 1.4587, 1.9939])
- elif self.default_arm_pose == "diagonal30":
- pos[self.arm_control_idx] = np.array([-1.06595, -0.22184, 1.53448, 1.46076, -0.84995, 1.36904, 1.90996])
- elif self.default_arm_pose == "diagonal45":
- pos[self.arm_control_idx] = np.array([-1.11479, -0.0685, 1.5696, 1.37304, -0.74273, 1.3983, 1.79618])
- elif self.default_arm_pose == "horizontal":
- pos[self.arm_control_idx] = np.array([-1.43016, 0.20965, 1.86816, 1.77576, -0.27289, 1.31715, 2.01226])
- else:
- raise ValueError("Unknown default arm pose: {}".format(self.default_arm_pose))
- return pos
-
- def _create_discrete_action_space(self):
- # Fetch does not support discrete actions
- raise ValueError("Fetch does not support discrete actions!")
-
- def reset(self):
- # In addition to normal reset, reset the joint configuration to be in default untucked mode
- super().reset()
- joints = self.untucked_default_joint_pos
- set_joint_positions(self.get_body_id(), self.joint_ids, joints)
-
- def _load(self, simulator):
- # Run super method
- ids = super()._load(simulator)
-
- # Extend super method by increasing laterial friction for EEF
- for link in self.finger_joint_ids:
- p.changeDynamics(self.get_body_id(), link, lateralFriction=500)
-
- return ids
-
- def _actions_to_control(self, action):
- # Run super method first
- u_vec, u_type_vec = super()._actions_to_control(action=action)
-
- # Override trunk value if we're keeping the trunk rigid
- if self.rigid_trunk:
- u_vec[self.trunk_control_idx] = self.untucked_default_joint_pos[self.trunk_control_idx]
- u_type_vec[self.trunk_control_idx] = ControlType.POSITION
-
- # Return control
- return u_vec, u_type_vec
-
- def _filter_assisted_grasp_candidates(self, contact_dict, candidates):
- # Filter any values that are outside of the gripper's bounding box
- # Compute gripper bounding box
- corners = []
-
- eef_pos, eef_orn, _, _, _, _ = p.getLinkState(self.get_body_id(), self.eef_link_id)
- i_eef_pos, i_eef_orn = p.invertTransform(eef_pos, eef_orn)
-
- gripper_fork_1_state = p.getLinkState(self.get_body_id(), self.gripper_finger_joint_ids[0])
- local_corners = [
- [0.04, -0.012, 0.014],
- [0.04, -0.012, -0.014],
- [-0.04, -0.012, 0.014],
- [-0.04, -0.012, -0.014],
- ]
- for coord in local_corners:
- corner, _ = p.multiplyTransforms(gripper_fork_1_state[0], gripper_fork_1_state[1], coord, [0, 0, 0, 1])
- corners.append(corner)
-
- gripper_fork_2_state = p.getLinkState(self.get_body_id(), self.gripper_finger_joint_ids[1])
- local_corners = [
- [0.04, 0.012, 0.014],
- [0.04, 0.012, -0.014],
- [-0.04, 0.012, 0.014],
- [-0.04, 0.012, -0.014],
- ]
- for coord in local_corners:
- corner, _ = p.multiplyTransforms(gripper_fork_2_state[0], gripper_fork_2_state[1], coord, [0, 0, 0, 1])
- corners.append(corner)
-
- eef_local_corners = []
- for coord in corners:
- corner, _ = p.multiplyTransforms(i_eef_pos, i_eef_orn, coord, [0, 0, 0, 1])
- eef_local_corners.append(corner)
-
- eef_local_corners = np.stack(eef_local_corners)
- for candidate in candidates:
- new_contact_point_data = []
- for contact_point_data in contact_dict[candidate]:
- pos = contact_point_data["contact_position"]
- local_pos, _ = p.multiplyTransforms(i_eef_pos, i_eef_orn, pos, [0, 0, 0, 1])
- x_inside = np.min(eef_local_corners[:, 0]) < local_pos[0] < np.max(eef_local_corners[:, 0])
- y_inside = np.min(eef_local_corners[:, 1]) < local_pos[1] < np.max(eef_local_corners[:, 1])
- z_inside = np.min(eef_local_corners[:, 2]) < local_pos[2] < np.max(eef_local_corners[:, 2])
- if x_inside and y_inside and z_inside:
- new_contact_point_data.append(contact_point_data)
- contact_dict[candidate] = new_contact_point_data
-
- return contact_dict
-
- def _get_proprioception_dict(self):
- dic = super()._get_proprioception_dict()
-
- # Add trunk info
- dic["trunk_qpos"] = self.joint_positions[self.trunk_control_idx]
- dic["trunk_qvel"] = self.joint_velocities[self.trunk_control_idx]
-
- return dic
-
- @property
- def default_proprio_obs(self):
- obs_keys = super().default_proprio_obs
- return obs_keys + ["trunk_qpos"]
-
- @property
- def controller_order(self):
- # Ordered by general robot kinematics chain
- return ["base", "camera", "arm", "gripper"]
-
- @property
- def _default_controllers(self):
- # Always call super first
- controllers = super()._default_controllers
-
- # We use parallel jaw gripper, differential drive, and IK controllers as default
- controllers["base"] = "DifferentialDriveController"
- controllers["camera"] = "JointController"
- controllers["arm"] = "InverseKinematicsController"
- controllers["gripper"] = "ParallelJawGripperController"
-
- return controllers
-
- @property
- def _default_controller_config(self):
- # Grab defaults from super method first
- cfg = super()._default_controller_config
-
- # Use default IK controller -- also need to override joint idx being controlled to include trunk in default
- # IK arm controller
- cfg["arm"]["InverseKinematicsController"]["joint_idx"] = np.concatenate(
- [self.trunk_control_idx, self.arm_control_idx]
- )
-
- # If using rigid trunk, we also clamp its limits
- if self.rigid_trunk:
- cfg["arm"]["InverseKinematicsController"]["control_limits"]["position"][0][
- self.trunk_control_idx
- ] = self.untucked_default_joint_pos[self.trunk_control_idx]
- cfg["arm"]["InverseKinematicsController"]["control_limits"]["position"][1][
- self.trunk_control_idx
- ] = self.untucked_default_joint_pos[self.trunk_control_idx]
-
- return cfg
-
- @property
- def default_joint_pos(self):
- return self.untucked_default_joint_pos
-
- @property
- def wheel_radius(self):
- return 0.0613
-
- @property
- def wheel_axle_length(self):
- return 0.372
-
- @property
- def gripper_link_to_grasp_point(self):
- return np.array([0.1, 0, 0])
-
- @property
- def base_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
- """
- return np.array([0, 1])
-
- @property
- def trunk_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to trunk joint.
- """
- return np.array([2])
-
- @property
- def camera_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
- """
- return np.array([3, 4])
-
- @property
- def arm_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to arm joints.
- """
- return np.array([5, 6, 7, 8, 9, 10, 11])
-
- @property
- def gripper_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to gripper joints.
- """
- return np.array([12, 13])
-
- @property
- def disabled_collision_pairs(self):
- return [
- ["torso_lift_joint", "shoulder_lift_joint"],
- ["torso_lift_joint", "torso_fixed_joint"],
- ["caster_wheel_joint", "estop_joint"],
- ["caster_wheel_joint", "laser_joint"],
- ["caster_wheel_joint", "torso_fixed_joint"],
- ["caster_wheel_joint", "l_wheel_joint"],
- ["caster_wheel_joint", "r_wheel_joint"],
- ]
-
- @property
- def eef_link_name(self):
- return "gripper_link"
-
- @property
- def finger_link_names(self):
- return ["r_gripper_finger_link", "l_gripper_finger_link"]
-
- @property
- def finger_joint_names(self):
- return ["r_gripper_finger_joint", "l_gripper_finger_joint"]
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/fetch/fetch_gripper.urdf")
diff --git a/igibson/robots/fetch_gripper_robot.py b/igibson/robots/fetch_gripper_robot.py
new file mode 100644
index 000000000..3d413da22
--- /dev/null
+++ b/igibson/robots/fetch_gripper_robot.py
@@ -0,0 +1,776 @@
+import gym
+import numpy as np
+import pybullet as p
+
+import igibson.utils.transform_utils as T
+from igibson.controllers.ik_controller import IKController
+from igibson.external.pybullet_tools.utils import (
+ get_child_frame_pose,
+ get_constraint_violation,
+ get_joint_info,
+ get_relative_pose,
+ joints_from_names,
+ set_coll_filter,
+ set_joint_positions,
+)
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+# Assisted grasping parameters
+ASSIST_FRACTION = 1.0
+ARTICULATED_ASSIST_FRACTION = 0.7
+MIN_ASSIST_FORCE = 0
+MAX_ASSIST_FORCE = 500
+ASSIST_FORCE = MIN_ASSIST_FORCE + (MAX_ASSIST_FORCE - MIN_ASSIST_FORCE) * ASSIST_FRACTION
+CONSTRAINT_VIOLATION_THRESHOLD = 0.1
+RELEASE_WINDOW = 1 / 30.0 # release window in seconds
+
+# GRIPPER index constants
+GRIPPER_BASE_CENTER_OFFSET = [0.1, 0, 0]
+
+
+class FetchGripper(LocomotorRobot):
+ """
+ Fetch Robot
+ Reference: https://fetchrobotics.com/robotics-platforms/fetch-mobile-manipulator/
+ Uses joint velocity control
+ """
+
+ def __init__(self, simulator, config):
+ self.simulator = simulator
+ self.config = config
+ self.linear_velocity = config.get("linear_velocity", 1.0) # m/s
+ self.angular_velocity = config.get("angular_velocity", np.pi) # rad/second
+ self.head_velocity = config.get("head_velocity", 1.0) # 1.0 represents maximum joint velocity
+ self.arm_delta_pos_velocity = config.get("arm_delta_pos_velocity", 0.25) # delta_pos = 0.25m
+ self.arm_delta_orn_velocity = config.get("arm_delta_orn_velocity", np.deg2rad(30)) # delta_orn = 30deg
+ self.gripper_velocity = config.get("gripper_velocity", 1.0) # 1.0 represents maximum joint velocity
+ self.default_arm_pose = config.get("default_arm_pose", "vertical")
+ self.trunk_offset = config.get("trunk_offset", 0.0)
+ self.use_ag = config.get("use_ag", True) # Use assisted grasping
+ self.ag_strict_mode = config.get("ag_strict_mode", True) # Require object to be contained by forks for AG
+ self.wheel_dim = 2
+ self.head_dim = 2
+ self.arm_delta_pos_dim = 3
+ self.arm_delta_orn_dim = 3
+ self.gripper_dim = 1
+ self.wheel_axle_half = 0.186 # half of the distance between the wheels
+ self.wheel_radius = 0.0613 # radius of the wheels
+ self.head_limit_epsilon = 1e-2
+ self.gripper_limit_epsilon = 1e-2
+
+ self.wheel_joint_ids = np.array([1, 2])
+ self.head_joint_ids = np.array([4, 5])
+ self.arm_joint_ids = np.array([3, 12, 13, 14, 15, 16, 17, 18])
+ self.gripper_joint_ids = np.array([19, 20, 21])
+ self.gripper_finger_joint_ids = np.array([20, 21])
+
+ self.wheel_joint_action_idx = [i for i, idn in enumerate(self.joint_ids) if idn in self.wheel_joint_ids]
+ self.head_joint_action_idx = [i for i, idn in enumerate(self.joint_ids) if idn in self.head_joint_ids]
+ self.arm_joint_action_idx = [i for i, idn in enumerate(self.joint_ids) if idn in self.arm_joint_ids]
+ self.gripper_joint_action_idx = [
+ i for i, idn in enumerate(self.joint_ids) if idn in self.gripper_finger_joint_ids
+ ]
+
+ LocomotorRobot.__init__(
+ self,
+ "fetch/fetch_gripper.urdf",
+ action_dim=self.wheel_dim
+ + self.head_dim
+ + self.arm_delta_pos_dim
+ + self.arm_delta_orn_dim
+ + self.gripper_dim,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control=["differential_drive"] * 2 + ["velocity"] * 12,
+ self_collision=False,
+ )
+
+ # Assistive grasp params
+ self.object_in_hand = None
+ self.obj_cid = None
+ self.obj_cid_params = {}
+ self.should_freeze_joints = False
+ self.release_counter = None
+ self.freeze_vals = {}
+
+ @property
+ def joint_ids(self):
+ return np.array([1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 20, 21])
+
+ @property
+ def joint_damping(self):
+ return np.array([get_joint_info(self.robot_ids[0], joint_id)[6] for joint_id in self.joint_ids])
+
+ @property
+ def num_joints(self):
+ return len(self.joint_ids)
+
+ @property
+ def lower_joint_limits(self):
+ return np.array(
+ [
+ -100.0,
+ -100.0,
+ 0.0,
+ -1.57,
+ -0.76,
+ -1.6056,
+ -1.221,
+ -100.0,
+ -2.251,
+ -100.0,
+ -2.16,
+ -100.0,
+ 0.0,
+ 0.0,
+ ]
+ )
+
+ @property
+ def upper_joint_limits(self):
+ return np.array(
+ [
+ 100.0,
+ 100.0,
+ 0.38615,
+ 1.57,
+ 1.45,
+ 1.6056,
+ 1.518,
+ 100.0,
+ 2.251,
+ 100.0,
+ 2.16,
+ 100.0,
+ 0.05,
+ 0.05,
+ ]
+ )
+
+ @property
+ def joint_range(self):
+ return self.upper_joint_limits - self.lower_joint_limits
+
+ @property
+ def max_joint_velocities(self):
+ return np.array(
+ [
+ 17.4,
+ 17.4,
+ 0.1,
+ 1.57,
+ 1.57,
+ 1.256,
+ 1.454,
+ 1.571,
+ 1.521,
+ 1.571,
+ 2.268,
+ 2.268,
+ 0.05,
+ 0.05,
+ ]
+ )
+
+ @property
+ def eef_link_id(self):
+ """
+ Link corresponding to eef
+ """
+ return 19
+
+ @property
+ def tucked_default_joints(self):
+ return np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.02, # trunk
+ 0.0,
+ 0.0, # head
+ 1.1707963267948966,
+ 1.4707963267948965,
+ -0.4,
+ 1.6707963267948966,
+ 0.0,
+ 1.5707963267948966,
+ 0.0, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+
+ @property
+ def untucked_default_joints(self):
+ if self.default_arm_pose == "vertical":
+ pose = np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.3 + self.trunk_offset, # trunk
+ 0.0,
+ 0.45, # head
+ -0.94121,
+ -0.64134,
+ 1.55186,
+ 1.65672,
+ -0.93218,
+ 1.53416,
+ 2.14474, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+ elif self.default_arm_pose == "diagonal15":
+ pose = np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.3 + self.trunk_offset, # trunk
+ 0.0,
+ 0.45, # head
+ -0.95587,
+ -0.34778,
+ 1.46388,
+ 1.47821,
+ -0.93813,
+ 1.4587,
+ 1.9939, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+ elif self.default_arm_pose == "diagonal30":
+ pose = np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.3 + self.trunk_offset, # trunk
+ 0.0,
+ 0.45, # head
+ -1.06595,
+ -0.22184,
+ 1.53448,
+ 1.46076,
+ -0.84995,
+ 1.36904,
+ 1.90996, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+ elif self.default_arm_pose == "diagonal45":
+ pose = np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.3 + self.trunk_offset, # trunk
+ 0.0,
+ 0.45, # head
+ -1.11479,
+ -0.0685,
+ 1.5696,
+ 1.37304,
+ -0.74273,
+ 1.3983,
+ 1.79618, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+ else: # horizontal
+ pose = np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.3 + self.trunk_offset, # trunk
+ 0.0,
+ 0.45, # head
+ -1.43016,
+ 0.20965,
+ 1.86816,
+ 1.77576,
+ -0.27289,
+ 1.31715,
+ 2.01226, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+
+ return pose
+
+ def force_wakeup(self):
+ """
+ compatibility hack - mjlbach
+ """
+ pass
+
+ def get_proprioception_dim(self):
+ return 49
+
+ def get_proprioception(self):
+ relative_eef_pos = self.get_relative_eef_position()
+ relative_eef_orn = p.getEulerFromQuaternion(self.get_relative_eef_orientation())
+ joint_states = np.array([j.get_state() for j in self.ordered_joints]).astype(np.float32).flatten()
+ ag_data = self.calculate_ag_object()
+ has_grasped = np.array([ag_data is not None]).astype(np.float32)
+ self.ag_data = ag_data
+ return np.concatenate([relative_eef_pos, relative_eef_orn, has_grasped, joint_states])
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_high = np.array(
+ [self.linear_velocity]
+ + [self.angular_velocity]
+ + [self.head_velocity] * self.head_dim
+ + [self.arm_delta_pos_velocity] * self.arm_delta_pos_dim
+ + [self.arm_delta_orn_velocity] * self.arm_delta_orn_dim
+ + [self.gripper_velocity] * self.gripper_dim
+ )
+ self.action_low = -self.action_high
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ assert False, "Fetch does not support discrete actions"
+
+ def robot_specific_reset(self):
+ """
+ Fetch robot specific reset.
+ Reset the torso lift joint and tuck the arm towards the body
+ """
+ super(FetchGripper, self).robot_specific_reset()
+
+ joints = self.untucked_default_joints
+ set_joint_positions(self.robot_ids[0], self.joint_ids, joints)
+
+ self.controller.reset()
+
+ def get_end_effector_position(self):
+ """
+ Get end-effector position
+ """
+ return self.parts["gripper_link"].get_position()
+
+ def end_effector_part_index(self):
+ """
+ Get end-effector link id
+ """
+ return self.parts["gripper_link"].body_part_index
+
+ def get_relative_eef_pose(self):
+ """
+ Get relative end-effector pose wrt robot base (returns 4x4 homogenous array)
+ """
+ return T.pose2mat(get_relative_pose(body=self.robot_ids[0], link1=self.eef_link_id))
+
+ def get_relative_eef_position(self):
+ """
+ Get relative end-effector position wrt robot base
+ """
+ return self.get_relative_eef_pose()[:3, -1]
+
+ def get_relative_eef_orientation(self):
+ """
+ Get relative end-effector orientation wrt robot base, in quaternion form
+ """
+ return T.mat2quat(self.get_relative_eef_pose()[:3, :3])
+
+ def load(self):
+ """
+ Load the robot into pybullet. Filter out unnecessary self collision
+ due to modeling imperfection in the URDF
+ """
+ ids = super(FetchGripper, self).load()
+ robot_id = self.robot_ids[0]
+
+ disable_collision_names = [
+ ["torso_lift_joint", "shoulder_lift_joint"],
+ ["torso_lift_joint", "torso_fixed_joint"],
+ ["caster_wheel_joint", "estop_joint"],
+ ["caster_wheel_joint", "laser_joint"],
+ ["caster_wheel_joint", "torso_fixed_joint"],
+ ["caster_wheel_joint", "l_wheel_joint"],
+ ["caster_wheel_joint", "r_wheel_joint"],
+ ]
+ for names in disable_collision_names:
+ link_a, link_b = joints_from_names(robot_id, names)
+ p.setCollisionFilterPair(robot_id, robot_id, link_a, link_b, 0)
+
+ self.controller = IKController(robot=self, config=self.config)
+
+ # Increase lateral friction for end effector
+ for link in self.gripper_joint_ids:
+ p.changeDynamics(self.get_body_id(), link, lateralFriction=500)
+
+ return ids
+
+ def apply_action(self, action):
+ """
+ Apply policy action
+ """
+ if self.use_ag:
+ self.handle_assisted_grasping(action)
+
+ real_action = self.policy_action_to_robot_action(action)
+ self.apply_robot_action(real_action)
+ if self.should_freeze_joints:
+ self.freeze_joints()
+
+ def policy_action_to_robot_action(self, action):
+ self.calc_state()
+ # action has 2 + 2 + 6 + 1 = 11 dimensional
+ robot_action = super(FetchGripper, self).policy_action_to_robot_action(action)
+ new_robot_action = np.zeros(self.num_joints)
+
+ # dim 0 and 1: linear and angular velocities of robot base
+ new_robot_action[self.wheel_joint_action_idx] = robot_action[:2]
+
+ # dim 2 and 3: head joint velocities
+ current_joint_position = np.array(
+ [item[0] for item in p.getJointStates(self.robot_ids[0], self.head_joint_ids)]
+ )
+ violate_lower_limit = current_joint_position < (
+ self.lower_joint_limits[self.head_joint_action_idx] + self.head_limit_epsilon
+ )
+ negative_action = robot_action[2:4] < 0.0
+
+ violate_upper_limit = current_joint_position > (
+ self.upper_joint_limits[self.head_joint_action_idx] - self.head_limit_epsilon
+ )
+ positive_action = robot_action[2:4] > 0.0
+
+ # zero out head movement velocity if it gets close to the joint limit
+ violation = np.logical_or(violate_lower_limit * negative_action, violate_upper_limit * positive_action)
+ new_robot_action[self.head_joint_action_idx] = robot_action[2:4] * (~violation)
+
+ # dim 4-9: eef delta pos and orn
+ new_robot_action[self.arm_joint_action_idx] = (
+ self.controller.control(robot_action[4:10])[self.arm_joint_action_idx]
+ / self.max_joint_velocities[self.arm_joint_action_idx]
+ )
+
+ # dim 10: gripper open/close (with maximum joint velocity)
+ if robot_action[10] > 0.0:
+ new_robot_action[self.gripper_joint_action_idx] = 1.0
+ else:
+ new_robot_action[self.gripper_joint_action_idx] = -1.0
+
+ current_joint_position = np.array(
+ [item[0] for item in p.getJointStates(self.robot_ids[0], self.gripper_finger_joint_ids)]
+ )
+ violate_lower_limit = current_joint_position < (
+ self.lower_joint_limits[self.gripper_joint_action_idx] + self.gripper_limit_epsilon
+ )
+ negative_action = new_robot_action[self.gripper_joint_action_idx] < 0.0
+
+ violate_upper_limit = current_joint_position > (
+ self.upper_joint_limits[self.gripper_joint_action_idx] - self.gripper_limit_epsilon
+ )
+ positive_action = new_robot_action[self.gripper_joint_action_idx] > 0.0
+
+ # zero out gripper velocity if it gets close to the joint limit
+ violation = np.logical_or(violate_lower_limit * negative_action, violate_upper_limit * positive_action)
+ new_robot_action[self.gripper_joint_action_idx] *= ~violation
+
+ return new_robot_action
+
+ def calculate_ag_object(self):
+ """
+ Calculates which object to assisted-grasp. Returns an (object_id, link_id) tuple or None
+ if no valid AG-enabled object can be found.
+ """
+ # Step 1 - find all objects in contact with both gripper forks
+ gripper_fork_1_contacts = p.getContactPoints(
+ bodyA=self.get_body_id(), linkIndexA=self.gripper_finger_joint_ids[0]
+ )
+ gripper_fork_2_contacts = p.getContactPoints(
+ bodyA=self.get_body_id(), linkIndexA=self.gripper_finger_joint_ids[1]
+ )
+
+ contact_dict = {}
+ set_1_contacts = set()
+ for contact in gripper_fork_1_contacts:
+ set_1_contacts.add(contact[2])
+ if contact[2] not in contact_dict:
+ contact_dict[contact[2]] = []
+ contact_dict[contact[2]].append({"contact_position": contact[5], "target_link": contact[4]})
+
+ set_2_contacts = set()
+ for contact in gripper_fork_2_contacts:
+ set_2_contacts.add(contact[2])
+ if contact[2] not in contact_dict:
+ contact_dict[contact[2]] = []
+ contact_dict[contact[2]].append({"contact_position": contact[5], "target_link": contact[4]})
+
+ candidates = list(set_1_contacts.intersection(set_2_contacts))
+
+ if len(candidates) == 0:
+ return None
+
+ # Step 2, check if contact with target is inside bounding box
+ # Might be easier to check if contact normal points towards or away from center of gripper from
+ # getContact Points
+
+ if self.ag_strict_mode:
+ # Compute gripper bounding box
+ corners = []
+
+ eef_pos, eef_orn, _, _, _, _ = p.getLinkState(self.get_body_id(), self.eef_link_id)
+ i_eef_pos, i_eef_orn = p.invertTransform(eef_pos, eef_orn)
+
+ gripper_fork_1_state = p.getLinkState(self.get_body_id(), self.gripper_finger_joint_ids[0])
+ local_corners = [
+ [0.04, -0.012, 0.014],
+ [0.04, -0.012, -0.014],
+ [-0.04, -0.012, 0.014],
+ [-0.04, -0.012, -0.014],
+ ]
+ for coord in local_corners:
+ corner, _ = p.multiplyTransforms(gripper_fork_1_state[0], gripper_fork_1_state[1], coord, [0, 0, 0, 1])
+ corners.append(corner)
+
+ gripper_fork_2_state = p.getLinkState(self.get_body_id(), self.gripper_finger_joint_ids[1])
+ local_corners = [
+ [0.04, 0.012, 0.014],
+ [0.04, 0.012, -0.014],
+ [-0.04, 0.012, 0.014],
+ [-0.04, 0.012, -0.014],
+ ]
+ for coord in local_corners:
+ corner, _ = p.multiplyTransforms(gripper_fork_2_state[0], gripper_fork_2_state[1], coord, [0, 0, 0, 1])
+ corners.append(corner)
+
+ eef_local_corners = []
+ for coord in corners:
+ corner, _ = p.multiplyTransforms(i_eef_pos, i_eef_orn, coord, [0, 0, 0, 1])
+ eef_local_corners.append(corner)
+
+ eef_local_corners = np.stack(eef_local_corners)
+ for candidate in candidates:
+ new_contact_point_data = []
+ for contact_point_data in contact_dict[candidate]:
+ pos = contact_point_data["contact_position"]
+ local_pos, _ = p.multiplyTransforms(i_eef_pos, i_eef_orn, pos, [0, 0, 0, 1])
+ x_inside = local_pos[0] < np.max(eef_local_corners[:, 0]) and local_pos[0] > np.min(
+ eef_local_corners[:, 0]
+ )
+ y_inside = local_pos[1] < np.max(eef_local_corners[:, 1]) and local_pos[1] > np.min(
+ eef_local_corners[:, 1]
+ )
+ z_inside = local_pos[2] < np.max(eef_local_corners[:, 2]) and local_pos[2] > np.min(
+ eef_local_corners[:, 2]
+ )
+ if x_inside and y_inside and z_inside:
+ new_contact_point_data.append(contact_point_data)
+ contact_dict[candidate] = new_contact_point_data
+
+ # Step 3 - find the closest object to the gripper center among these "inside" objects
+ gripper_state = p.getLinkState(self.get_body_id(), self.eef_link_id)
+ # Compute gripper bounding box
+ gripper_center_pos = np.copy(GRIPPER_BASE_CENTER_OFFSET)
+ gripper_center_pos, _ = p.multiplyTransforms(
+ gripper_state[0], gripper_state[1], gripper_center_pos, [0, 0, 0, 1]
+ )
+
+ self.candidate_data = []
+ for candidate in candidates:
+ for contact_point_data in contact_dict[candidate]:
+ dist = np.linalg.norm(np.array(contact_point_data["contact_position"]) - np.array(gripper_center_pos))
+ self.candidate_data.append((candidate, contact_point_data["target_link"], dist))
+
+ self.candidate_data = sorted(self.candidate_data, key=lambda x: x[2])
+ if len(self.candidate_data) > 0:
+ ag_bid, ag_link, _ = self.candidate_data[0]
+ else:
+ return None
+
+ # Return None if any of the following edge cases are activated
+ if not self.simulator.can_assisted_grasp(ag_bid, ag_link) or (self.get_body_id() == ag_bid):
+ return None
+
+ return ag_bid, ag_link
+
+ def release_grasp(self):
+ p.removeConstraint(self.obj_cid)
+ self.obj_cid = None
+ self.obj_cid_params = {}
+ self.should_freeze_joints = False
+ self.release_counter = 0
+
+ def handle_release_window(self):
+ self.release_counter += 1
+ time_since_release = self.release_counter * self.simulator.render_timestep
+ if time_since_release >= RELEASE_WINDOW:
+ set_coll_filter(
+ target_id=self.object_in_hand,
+ body_id=self.get_body_id(),
+ body_links=self.gripper_joint_ids,
+ enable=True,
+ )
+ self.object_in_hand = None
+ self.release_counter = None
+
+ def freeze_joints(self):
+ """
+ Freezes gripper finger joints - used in assisted grasping.
+ """
+ for joint_index, j_val in self.freeze_vals.items():
+ p.resetJointState(self.get_body_id(), joint_index, targetValue=j_val, targetVelocity=0.0)
+
+ def establish_grasp(self, ag_data):
+ ag_bid, ag_link = ag_data
+
+ child_frame_pos, child_frame_orn = get_child_frame_pose(
+ parent_bid=self.get_body_id(), parent_link=self.eef_link_id, child_bid=ag_bid, child_link=ag_link
+ )
+
+ # If we grab a child link of a URDF, create a p2p joint
+ if ag_link == -1:
+ joint_type = p.JOINT_FIXED
+ else:
+ joint_type = p.JOINT_POINT2POINT
+
+ self.obj_cid = p.createConstraint(
+ parentBodyUniqueId=self.get_body_id(),
+ parentLinkIndex=self.eef_link_id,
+ childBodyUniqueId=ag_bid,
+ childLinkIndex=ag_link,
+ jointType=joint_type,
+ jointAxis=(0, 0, 0),
+ parentFramePosition=(0, 0, 0),
+ childFramePosition=child_frame_pos,
+ childFrameOrientation=child_frame_orn,
+ )
+ # Modify max force based on user-determined assist parameters
+ if ag_link == -1:
+ max_force = ASSIST_FORCE
+ else:
+ max_force = ASSIST_FORCE * ARTICULATED_ASSIST_FRACTION
+ p.changeConstraint(self.obj_cid, maxForce=max_force)
+
+ self.obj_cid_params = {
+ "childBodyUniqueId": ag_bid,
+ "childLinkIndex": ag_link,
+ "jointType": joint_type,
+ "maxForce": max_force,
+ }
+ self.object_in_hand = ag_bid
+ self.should_freeze_joints = True
+ # Disable collisions while picking things up
+ set_coll_filter(target_id=ag_bid, body_id=self.get_body_id(), body_links=self.gripper_joint_ids, enable=False)
+ for joint_index in self.gripper_finger_joint_ids:
+ j_val = p.getJointState(self.get_body_id(), joint_index)[0]
+ self.freeze_vals[joint_index] = j_val
+
+ def handle_assisted_grasping(self, action):
+ """
+ Handles assisted grasping.
+ :param action: numpy array of actions.
+ """
+
+ applying_grasp = action[10] < 0.0
+ releasing_grasp = action[10] > 0.0
+
+ # Execute gradual release of object
+ if self.object_in_hand is not None and self.release_counter is not None:
+ self.handle_release_window()
+
+ elif self.object_in_hand and self.release_counter is None:
+ constraint_violated = get_constraint_violation(self.obj_cid) > CONSTRAINT_VIOLATION_THRESHOLD
+ if constraint_violated or releasing_grasp:
+ self.release_grasp()
+
+ elif not self.object_in_hand and applying_grasp:
+ ag_data = self.calculate_ag_object()
+ if ag_data:
+ self.establish_grasp(ag_data)
+
+ def is_grasping(self, candidate_obj):
+ return np.array([self.object_in_hand == candidate_obj])
+
+ # significant overlap with BehaviorRobot
+ def dump_state(self):
+ if not self.use_ag:
+ return None
+
+ # Recompute child frame pose because it could have changed since the
+ # constraint has been created
+ if self.obj_cid is not None:
+ ag_bid = self.obj_cid_params["childBodyUniqueId"]
+ ag_link = self.obj_cid_params["childLinkIndex"]
+ child_frame_pos, child_frame_orn = child_frame_pos, child_frame_orn = get_child_frame_pose(
+ parent_bid=self.get_body_id(), parent_link=self.eef_link_id, child_bid=ag_bid, child_link=ag_link
+ )
+ self.obj_cid_params.update(
+ {
+ "childFramePosition": child_frame_pos,
+ "childFrameOrientation": child_frame_orn,
+ }
+ )
+
+ return {
+ "object_in_hand": self.object_in_hand,
+ "release_counter": self.release_counter,
+ "should_freeze_joints": self.should_freeze_joints,
+ "freeze_vals": self.freeze_vals,
+ "obj_cid": self.obj_cid,
+ "obj_cid_params": self.obj_cid_params,
+ }
+
+ def load_state(self, dump):
+ if not self.use_ag:
+ return
+
+ # Cancel the previous AG if exists
+ if self.obj_cid is not None:
+ p.removeConstraint(self.obj_cid)
+
+ if self.object_in_hand is not None:
+ set_coll_filter(
+ target_id=self.object_in_hand,
+ body_id=self.get_body_id(),
+ body_links=self.gripper_joint_ids,
+ enable=True,
+ )
+
+ self.object_in_hand = dump["object_in_hand"]
+ self.release_counter = dump["release_counter"]
+ self.should_freeze_joints = dump["should_freeze_joints"]
+ self.freeze_vals = {int(key): val for key, val in dump["freeze_vals"].items()}
+ self.obj_cid = dump["obj_cid"]
+ self.obj_cid_params = dump["obj_cid_params"]
+ if self.obj_cid is not None:
+ self.obj_cid = p.createConstraint(
+ parentBodyUniqueId=self.get_body_id(),
+ parentLinkIndex=self.eef_link_id,
+ childBodyUniqueId=dump["obj_cid_params"]["childBodyUniqueId"],
+ childLinkIndex=dump["obj_cid_params"]["childLinkIndex"],
+ jointType=dump["obj_cid_params"]["jointType"],
+ jointAxis=(0, 0, 0),
+ parentFramePosition=(0, 0, 0),
+ childFramePosition=dump["obj_cid_params"]["childFramePosition"],
+ childFrameOrientation=dump["obj_cid_params"]["childFrameOrientation"],
+ )
+ p.changeConstraint(self.obj_cid, maxForce=dump["obj_cid_params"]["maxForce"])
+
+ if self.object_in_hand is not None:
+ set_coll_filter(
+ target_id=self.object_in_hand,
+ body_id=self.get_body_id(),
+ body_links=self.gripper_joint_ids,
+ enable=False,
+ )
+
+ def can_toggle(self, toggle_position, toggle_distance_threshold):
+ for joint_id in self.gripper_joint_ids:
+ finger_link_state = p.getLinkState(self.get_body_id(), joint_id)
+ link_pos = finger_link_state[0]
+ if np.linalg.norm(np.array(link_pos) - np.array(toggle_position)) < toggle_distance_threshold:
+ return True
+ return False
diff --git a/igibson/robots/fetch_robot.py b/igibson/robots/fetch_robot.py
new file mode 100644
index 000000000..ff7157e8d
--- /dev/null
+++ b/igibson/robots/fetch_robot.py
@@ -0,0 +1,117 @@
+import gym
+import numpy as np
+import pybullet as p
+
+from igibson.external.pybullet_tools.utils import joints_from_names, set_joint_positions
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class Fetch(LocomotorRobot):
+ """
+ Fetch Robot
+ Reference: https://fetchrobotics.com/robotics-platforms/fetch-mobile-manipulator/
+ Uses joint velocity control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.wheel_velocity = config.get("wheel_velocity", 1.0)
+ self.torso_lift_velocity = config.get("torso_lift_velocity", 1.0)
+ self.arm_velocity = config.get("arm_velocity", 1.0)
+ self.wheel_dim = 2
+ self.torso_lift_dim = 1
+ self.arm_dim = 7
+ LocomotorRobot.__init__(
+ self,
+ "fetch/fetch.urdf",
+ action_dim=self.wheel_dim + self.torso_lift_dim + self.arm_dim,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="velocity",
+ self_collision=True,
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_high = np.array(
+ [self.wheel_velocity] * self.wheel_dim
+ + [self.torso_lift_velocity] * self.torso_lift_dim
+ + [self.arm_velocity] * self.arm_dim
+ )
+ self.action_low = -self.action_high
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ assert False, "Fetch does not support discrete actions"
+
+ def robot_specific_reset(self):
+ """
+ Fetch robot specific reset.
+ Reset the torso lift joint and tuck the arm towards the body
+ """
+ super(Fetch, self).robot_specific_reset()
+
+ # roll the arm to its body
+ robot_id = self.robot_ids[0]
+ arm_joints = joints_from_names(
+ robot_id,
+ [
+ "torso_lift_joint",
+ "shoulder_pan_joint",
+ "shoulder_lift_joint",
+ "upperarm_roll_joint",
+ "elbow_flex_joint",
+ "forearm_roll_joint",
+ "wrist_flex_joint",
+ "wrist_roll_joint",
+ ],
+ )
+
+ rest_position = (0.02, np.pi / 2.0 - 0.4, np.pi / 2.0 - 0.1, -0.4, np.pi / 2.0 + 0.1, 0.0, np.pi / 2.0, 0.0)
+ # might be a better pose to initiate manipulation
+ # rest_position = (0.30322468280792236, -1.414019864768982,
+ # 1.5178184935241699, 0.8189625336474915,
+ # 2.200358942909668, 2.9631312579803466,
+ # -1.2862852996643066, 0.0008453550418615341)
+
+ set_joint_positions(robot_id, arm_joints, rest_position)
+
+ def get_end_effector_position(self):
+ """
+ Get end-effector position
+ """
+ return self.parts["gripper_link"].get_position()
+
+ def end_effector_part_index(self):
+ """
+ Get end-effector link id
+ """
+ return self.parts["gripper_link"].body_part_index
+
+ def load(self):
+ """
+ Load the robot into pybullet. Filter out unnecessary self collision
+ due to modeling imperfection in the URDF
+ """
+ ids = super(Fetch, self).load()
+ robot_id = self.robot_ids[0]
+
+ disable_collision_names = [
+ ["torso_lift_joint", "shoulder_lift_joint"],
+ ["torso_lift_joint", "torso_fixed_joint"],
+ ["caster_wheel_joint", "estop_joint"],
+ ["caster_wheel_joint", "laser_joint"],
+ ["caster_wheel_joint", "torso_fixed_joint"],
+ ["caster_wheel_joint", "l_wheel_joint"],
+ ["caster_wheel_joint", "r_wheel_joint"],
+ ]
+ for names in disable_collision_names:
+ link_a, link_b = joints_from_names(robot_id, names)
+ p.setCollisionFilterPair(robot_id, robot_id, link_a, link_b, 0)
+
+ return ids
diff --git a/igibson/robots/fetch_vr_robot.py b/igibson/robots/fetch_vr_robot.py
new file mode 100644
index 000000000..66e27291b
--- /dev/null
+++ b/igibson/robots/fetch_vr_robot.py
@@ -0,0 +1,258 @@
+import os
+
+import numpy as np
+import pybullet as p
+
+from igibson import assets_path
+from igibson.external.pybullet_tools.utils import (
+ get_joint_positions,
+ get_max_limits,
+ get_min_limits,
+ joints_from_names,
+ set_joint_positions,
+)
+from igibson.objects.visual_marker import VisualMarker
+from igibson.objects.vr_objects import VrGazeMarker
+from igibson.robots.fetch_robot import Fetch
+from igibson.robots.robot_locomotor import LocomotorRobot
+from igibson.utils.utils import parse_config
+from igibson.utils.vr_utils import calc_z_dropoff
+
+
+class FetchVR(Fetch):
+ """
+ Fetch robot used in VR embodiment demos.
+ """
+
+ def __init__(self, s, start_pos, update_freq=1, control_hand="right", use_ns_ik=True, use_gaze_marker=True):
+ config = parse_config(os.path.join(assets_path, "models", "fetch", "fetcH_embodiment.yaml"))
+ self.wheel_velocity = config.get("wheel_velocity", 1.0)
+ self.torso_lift_velocity = config.get("torso_lift_velocity", 1.0)
+ self.arm_velocity = config.get("arm_velocity", 1.0)
+ self.wheel_dim = 2
+ # Torso lift has been disabled for VR, since it causes the torso to intersect the VR camera
+ self.torso_lift_dim = 0
+ # 7 for arm, 2 for gripper
+ self.arm_dim = 9
+ self.named_joint_list = [
+ "shoulder_pan_joint",
+ "shoulder_lift_joint",
+ "upperarm_roll_joint",
+ "elbow_flex_joint",
+ "forearm_roll_joint",
+ "wrist_flex_joint",
+ "wrist_roll_joint",
+ ]
+ if self.torso_lift_dim > 0:
+ self.named_joint_list += ["torso_lift_joint"]
+ LocomotorRobot.__init__(
+ self,
+ "fetch/fetch_vr.urdf",
+ action_dim=self.wheel_dim + self.torso_lift_dim + self.arm_dim,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control=["differential_drive", "differential_drive"] + ["position"] * (self.torso_lift_dim + self.arm_dim),
+ self_collision=True,
+ )
+
+ self.sim = s
+ self.update_freq = update_freq
+ # The hand to use to control FetchVR - this can be set to left or right based on the user's preferences
+ self.control_hand = control_hand
+ self.control_device = "{}_controller".format(self.control_hand)
+ self.height = 1.2
+ self.wheel_axle_half = 0.18738 # half of the distance between the wheels
+ self.wheel_radius = 0.054 # radius of the wheels themselves
+
+ self.sim.import_robot(self)
+
+ # Position setup
+ self.set_position(start_pos)
+ self.fetch_vr_reset()
+ self.keep_still()
+ # Update data
+ self.frame_count = 0
+
+ # Load end effector
+ self.effector_marker = VisualMarker(rgba_color=[1, 0, 1, 0.2], radius=0.025)
+ self.sim.import_object(self.effector_marker, use_pbr=False, use_pbr_mapping=False, shadow_caster=False)
+ # Hide marker upon initialization
+ self.effector_marker.set_position([0, 0, -5])
+ # Arm joints excluding wheels and gripper
+ self.arm_joints = self.ordered_joints[2:9]
+ self.gripper_max_joint = 0.05
+ # IK control start can be toggled by pressing grip of control device
+ self.use_ik_control = False
+ # Whether to use null-space IK
+ self.use_ns_ik = use_ns_ik
+ self.use_gaze_marker = use_gaze_marker
+ if self.use_gaze_marker:
+ self.gm = VrGazeMarker(s)
+
+ # Set friction of grippers
+ self.gripper_ids = joints_from_names(self.robot_id, ["l_gripper_finger_joint", "r_gripper_finger_joint"])
+ for g_id in self.gripper_ids:
+ p.changeDynamics(self.robot_id, g_id, lateralFriction=2.5)
+
+ def fetch_vr_reset(self):
+ for j in self.ordered_joints:
+ j.reset_joint_state(0.0, 0.0)
+
+ # roll the arm to its body
+ self.robot_id = self.robot_ids[0]
+ self.arm_joint_ids = joints_from_names(self.robot_id, self.named_joint_list)
+ rest_position = (
+ -1.414019864768982,
+ 1.5178184935241699,
+ 0.8189625336474915,
+ 2.200358942909668,
+ 2.9631312579803466,
+ -1.2862852996643066,
+ 0.0008453550418615341,
+ )
+
+ set_joint_positions(self.robot_id, self.arm_joint_ids, rest_position)
+
+ def calc_ik_params(self):
+ max_limits = [0.0, 0.0] + get_max_limits(self.robot_id, self.arm_joint_ids) + [0.0, 0.0]
+ min_limits = [0.0, 0.0] + get_min_limits(self.robot_id, self.arm_joint_ids) + [0.0, 0.0]
+ rest_position = [0.0, 0.0] + list(get_joint_positions(self.robot_id, self.arm_joint_ids)) + [0.0, 0.0]
+ joint_range = list(np.array(max_limits) - np.array(min_limits))
+ joint_range = [item + 1 for item in joint_range]
+ joint_damping = [0.1 for _ in joint_range]
+
+ return (max_limits, min_limits, rest_position, joint_range, joint_damping)
+
+ def apply_frame_data(self, lin_vel, ang_vel, arm_poses, grip_frac):
+ """
+ Sets wheel velocity, arm positions and gripper open/close fraction each frame using data from VR system.
+ """
+ actions = np.array([lin_vel, ang_vel] + arm_poses + [grip_frac] * 2)
+ actions.reshape((actions.shape[0], 1))
+ self.apply_robot_action(actions)
+
+ def get_joint_pos(self):
+ """
+ Returns list containing all current joint positions of FetchVR robot (excluding wheels).
+ """
+ joint_pos = []
+ for n, j in enumerate(self.arm_joints):
+ j_pos, _, _ = j.get_state()
+ joint_pos.append(j_pos)
+
+ return joint_pos
+
+ def update(self, vr_data=None):
+ """
+ Updates FetchVR robot. If vr_data is supplied, overwrites VR input.
+ """
+ if vr_data:
+ hmd_is_valid, hmd_trans, hmd_rot, _, _, hmd_forward = vr_data.query("hmd")
+ hmd_world_pos, _ = vr_data.query("vr_positions")
+ transform_data = vr_data.query(self.control_device)[:3]
+ touch_data = vr_data.query("{}_button".format(self.control_device))
+ else:
+ hmd_is_valid, hmd_trans, hmd_rot = self.sim.get_data_for_vr_device("hmd")
+ _, _, hmd_forward = self.sim.get_device_coordinate_system("hmd")
+ hmd_world_pos = self.sim.get_hmd_world_pos()
+ transform_data = self.sim.get_data_for_vr_device(self.control_device)
+ touch_data = self.sim.get_button_data_for_controller(self.control_device)
+
+ is_valid, trans, rot = transform_data
+ trig_frac, touch_x, touch_y = touch_data
+
+ if hmd_is_valid:
+ # Set fetch orientation directly from HMD to avoid lag when turning and resultant motion sickness
+ self.set_z_rotation(hmd_rot, hmd_forward)
+
+ if not vr_data:
+ # Get world position and fetch position
+ fetch_pos = self.get_position()
+
+ # Calculate x and y offset to get to fetch position
+ # z offset is to the desired hmd height, corresponding to fetch head height
+ offset_to_fetch = [
+ fetch_pos[0] - hmd_world_pos[0],
+ fetch_pos[1] - hmd_world_pos[1],
+ self.height - hmd_world_pos[2],
+ ]
+ self.sim.set_vr_offset(offset_to_fetch)
+
+ if is_valid:
+ # Toggle IK on/off from grip press on control device
+ grip_press = (
+ ([self.control_device, "grip_press"] in vr_data.query("event_data"))
+ if vr_data
+ else self.sim.query_vr_event(self.control_device, "grip_press")
+ )
+ if grip_press:
+ self.use_ik_control = not self.use_ik_control
+
+ # Calculate linear and angular velocity as well as gripper positions
+ lin_vel = self.wheel_velocity * touch_y
+ ang_vel = 0
+ grip_frac = self.gripper_max_joint * (1 - trig_frac)
+
+ # Iteration and residual threshold values are based on recommendations from PyBullet
+ if self.use_ik_control and self.frame_count % self.update_freq == 0:
+ # Update effector marker to desired end-effector transform
+ self.effector_marker.set_position(trans)
+ self.effector_marker.set_orientation(rot)
+
+ if not self.use_ns_ik:
+ ik_joint_poses = p.calculateInverseKinematics(
+ self.robot_id,
+ self.end_effector_part_index(),
+ trans,
+ rot,
+ solver=0,
+ maxNumIterations=100,
+ residualThreshold=0.01,
+ )
+ else:
+ max_limits, min_limits, rest_position, joint_range, joint_damping = self.calc_ik_params()
+ ik_joint_poses = p.calculateInverseKinematics(
+ self.robot_id,
+ self.end_effector_part_index(),
+ trans,
+ rot,
+ lowerLimits=min_limits,
+ upperLimits=max_limits,
+ restPoses=rest_position,
+ jointDamping=joint_damping,
+ solver=0,
+ maxNumIterations=100,
+ residualThreshold=0.01,
+ )
+ # Exclude wheels and gripper joints
+ arm_poses = ik_joint_poses[2 : self.torso_lift_dim + self.arm_dim]
+ self.apply_frame_data(lin_vel, ang_vel, list(arm_poses), grip_frac)
+ else:
+ self.apply_frame_data(lin_vel, ang_vel, list(self.get_joint_pos()), grip_frac)
+
+ # Update gaze marker
+ if self.use_gaze_marker:
+ self.gm.update()
+
+ def set_z_rotation(self, hmd_rot, hmd_forward):
+ """
+ Sets the z rotation of the fetch VR robot using the provided HMD rotation.
+ Uses same attenuated z rotation based on verticality of HMD forward vector as VrBody class.
+ """
+ n_forward = np.array(hmd_forward)
+ # Normalized forward direction and z direction
+ n_forward = n_forward / np.linalg.norm(n_forward)
+ n_z = np.array([0.0, 0.0, 1.0])
+ # Calculate angle and convert to degrees
+ theta_z = np.arccos(np.dot(n_forward, n_z)) / np.pi * 180
+ # Move theta into range 0 to max_z
+ if theta_z > (180.0 - 45.0):
+ theta_z = 180.0 - theta_z
+ _, _, hmd_z = p.getEulerFromQuaternion(hmd_rot)
+ _, _, curr_z = p.getEulerFromQuaternion(self.get_orientation())
+ delta_z = hmd_z - curr_z
+ # Calculate z multiplication coefficient based on how much we are looking in up/down direction
+ z_mult = calc_z_dropoff(theta_z, 20.0, 45.0)
+ new_z = curr_z + delta_z * z_mult
+ fetch_rot = p.getQuaternionFromEuler([0, 0, new_z])
+ self.set_orientation(fetch_rot)
diff --git a/igibson/robots/freight.py b/igibson/robots/freight.py
deleted file mode 100644
index 12e28e6b4..000000000
--- a/igibson/robots/freight.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-
-import gym
-import numpy as np
-
-import igibson
-from igibson.robots.two_wheel_robot import TwoWheelRobot
-from igibson.utils.constants import SemanticClass
-
-
-class Freight(TwoWheelRobot):
- """
- Freight Robot
- Reference: https://fetchrobotics.com/robotics-platforms/freight-base/
- Uses joint velocity control
- """
-
- @property
- def wheel_radius(self):
- return 0.0613
-
- @property
- def wheel_axle_length(self):
- return 0.372
-
- @property
- def base_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
- """
- return np.array([0, 1])
-
- @property
- def default_joint_pos(self):
- return np.zeros(self.n_joints)
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/fetch/freight.urdf")
diff --git a/igibson/robots/freight_robot.py b/igibson/robots/freight_robot.py
new file mode 100644
index 000000000..2eebb6caa
--- /dev/null
+++ b/igibson/robots/freight_robot.py
@@ -0,0 +1,55 @@
+import gym
+import numpy as np
+
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class Freight(LocomotorRobot):
+ """
+ Freight Robot
+ Reference: https://fetchrobotics.com/robotics-platforms/freight-base/
+ Uses joint velocity control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.velocity = config.get("velocity", 1.0)
+ LocomotorRobot.__init__(
+ self,
+ "fetch/freight.urdf",
+ action_dim=2,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="velocity",
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+ self.action_high = self.velocity * np.ones([self.action_dim])
+ self.action_low = -self.action_high
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ self.action_list = [
+ [self.velocity, self.velocity],
+ [-self.velocity, -self.velocity],
+ [self.velocity * 0.5, -self.velocity * 0.5],
+ [-self.velocity * 0.5, self.velocity * 0.5],
+ [0, 0],
+ ]
+ self.action_space = gym.spaces.Discrete(len(self.action_list))
+ self.setup_keys_to_action()
+
+ def setup_keys_to_action(self):
+ self.keys_to_action = {
+ (ord("w"),): 0, # forward
+ (ord("s"),): 1, # backward
+ (ord("d"),): 2, # turn right
+ (ord("a"),): 3, # turn left
+ (): 4, # stay still
+ }
diff --git a/igibson/robots/legacy/humanoid_robot.py b/igibson/robots/humanoid_robot.py
similarity index 79%
rename from igibson/robots/legacy/humanoid_robot.py
rename to igibson/robots/humanoid_robot.py
index d3f688b74..c6e98822c 100644
--- a/igibson/robots/legacy/humanoid_robot.py
+++ b/igibson/robots/humanoid_robot.py
@@ -4,41 +4,48 @@
import numpy as np
import pybullet as p
-from igibson.robots.locomotion_robot import LocomotionRobot
+from igibson.robots.robot_locomotor import LocomotorRobot
-class Humanoid(LocomotionRobot):
+class Humanoid(LocomotorRobot):
"""
OpenAI Humanoid robot
Uses joint torque control
"""
- def __init__(self, config, **kwargs):
+ def __init__(self, config):
self.config = config
+ self.torque = config.get("torque", 0.1)
self.glass_id = None
self.glass_offset = 0.3
- LocomotionRobot.__init__(
+ LocomotorRobot.__init__(
self,
"humanoid/humanoid.xml",
action_dim=17,
+ torque_coef=0.41,
scale=config.get("robot_scale", 1.0),
is_discrete=config.get("is_discrete", False),
control="torque",
self_collision=True,
- **kwargs
)
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+ self.action_high = self.torque * np.ones([self.action_dim])
+ self.action_low = -self.action_high
+
def set_up_discrete_action_space(self):
"""
Set up discrete action space
"""
- if not self.normalize_robot_action:
- raise ValueError("discrete action only works with normalized action space")
- self.action_list = [[1.0] * self.action_dim, [0.0] * self.action_dim]
+ self.action_list = [[self.torque] * self.action_dim, [0.0] * self.action_dim]
self.action_space = gym.spaces.Discrete(len(self.action_list))
self.setup_keys_to_action()
- def reset(self):
+ def robot_specific_reset(self):
"""
Humanoid robot specific reset
Add spherical radiance/glass shield to protect the robot's camera
@@ -51,7 +58,7 @@ def reset(self):
humanoidId = i
# Spherical radiance/glass shield to protect the robot's camera
- super(Humanoid, self).reset()
+ super(Humanoid, self).robot_specific_reset()
if self.glass_id is None:
glass_path = os.path.join(self.physics_model_dir, "humanoid/glass.xml")
@@ -96,7 +103,7 @@ def apply_action(self, action):
self.apply_robot_action(real_action)
else:
for i, m, joint_torque_coef in zip(range(17), self.motors, self.motor_power):
- m.set_torque(float(joint_torque_coef * real_action[i]))
+ m.set_motor_torque(float(joint_torque_coef * self.torque_coef * real_action[i]))
def setup_keys_to_action(self):
self.keys_to_action = {(ord("w"),): 0, (): 1}
diff --git a/igibson/robots/husky.py b/igibson/robots/husky.py
deleted file mode 100644
index 937a9d130..000000000
--- a/igibson/robots/husky.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os
-
-import numpy as np
-
-import igibson
-from igibson.robots.locomotion_robot import LocomotionRobot
-
-
-class Husky(LocomotionRobot):
- """
- Husky robot
- Reference: https://clearpathrobotics.com/, http://wiki.ros.org/Robots/Husky
- """
-
- def _create_discrete_action_space(self):
- raise ValueError("Husky does not support discrete actions!")
-
- @property
- def base_control_idx(self):
- return np.array([0, 1, 2, 3])
-
- @property
- def default_joint_pos(self):
- return np.zeros(self.n_joints)
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/husky/husky.urdf")
diff --git a/igibson/robots/husky_robot.py b/igibson/robots/husky_robot.py
new file mode 100644
index 000000000..56e66c048
--- /dev/null
+++ b/igibson/robots/husky_robot.py
@@ -0,0 +1,77 @@
+import gym
+import numpy as np
+
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class Husky(LocomotorRobot):
+ """
+ Husky robot
+ Reference: https://clearpathrobotics.com/, http://wiki.ros.org/Robots/Husky
+ Uses joint torque control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.torque = config.get("torque", 0.03)
+ LocomotorRobot.__init__(
+ self,
+ "husky/husky.urdf",
+ action_dim=4,
+ torque_coef=2.5,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="torque",
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+ self.action_high = self.torque * np.ones([self.action_dim])
+ self.action_low = -self.action_high
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ self.action_list = [
+ [self.torque, self.torque, self.torque, self.torque],
+ [-self.torque, -self.torque, -self.torque, -self.torque],
+ [self.torque, -self.torque, self.torque, -self.torque],
+ [-self.torque, self.torque, -self.torque, self.torque],
+ [0, 0, 0, 0],
+ ]
+ self.action_space = gym.spaces.Discrete(len(self.action_list))
+ self.setup_keys_to_action()
+
+ def steering_cost(self, action):
+ """
+ Deprecated code for reward computation
+ """
+ if not self.is_discrete:
+ return 0
+ if action == 2 or action == 3:
+ return -0.1
+ else:
+ return 0
+
+ def alive_bonus(self, z, pitch):
+ """
+ Deprecated code for reward computation
+ """
+ top_xyz = self.parts["top_bumper_link"].get_position()
+ bottom_xyz = self.parts["base_link"].get_position()
+ alive = top_xyz[2] > bottom_xyz[2]
+ # 0.25 is central sphere rad, die if it scrapes the ground
+ return +1 if alive else -100
+
+ def setup_keys_to_action(self):
+ self.keys_to_action = {
+ (ord("w"),): 0, # forward
+ (ord("s"),): 1, # backward
+ (ord("d"),): 2, # turn right
+ (ord("a"),): 3, # turn left
+ (): 4,
+ }
diff --git a/igibson/robots/jr2.py b/igibson/robots/jr2.py
deleted file mode 100644
index 382c5d42e..000000000
--- a/igibson/robots/jr2.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import os
-
-import numpy as np
-import pybullet as p
-
-import igibson
-from igibson.external.pybullet_tools.utils import set_joint_positions
-from igibson.robots.manipulation_robot import ManipulationRobot
-from igibson.robots.two_wheel_robot import TwoWheelRobot
-from igibson.utils.constants import SemanticClass
-
-
-class JR2(ManipulationRobot, TwoWheelRobot):
- """
- JR2 Kinova robot
- Reference: https://cvgl.stanford.edu/projects/jackrabbot/
- """
-
- def _create_discrete_action_space(self):
- # JR2 does not support discrete actions if we're controlling the arm as well
- raise ValueError("Full JR2 does not support discrete actions!")
-
- def _validate_configuration(self):
- # Make sure we're not using assisted grasping
- assert (
- self.assisted_grasping_mode is None
- ), "Cannot use assisted grasping modes for JR2 since gripper is disabled!"
-
- # Make sure we're using a null controller for the gripper
- assert (
- self.controller_config["gripper"]["name"] == "NullGripperController"
- ), "JR2 robot has its gripper disabled, so cannot use any controller other than NullGripperController!"
-
- # run super
- super()._validate_configuration()
-
- def reset(self):
- # In addition to normal reset, reset the joint configuration to be in default mode
- super().reset()
- joints = self.default_joint_pos
- set_joint_positions(self.get_body_id(), self.joint_ids, joints)
-
- @property
- def controller_order(self):
- # Ordered by general robot kinematics chain
- return ["base", "arm", "gripper"]
-
- @property
- def _default_controllers(self):
- # Always call super first
- controllers = super()._default_controllers
-
- # We use differential drive with joint controller for arm, since arm only has 5DOF
- controllers["base"] = "DifferentialDriveController"
- controllers["arm"] = "JointController"
- controllers["gripper"] = "NullGripperController"
-
- return controllers
-
- @property
- def default_joint_pos(self):
- return np.array([0.0, 0.0, -np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2, 0.0])
-
- @property
- def wheel_radius(self):
- return 0.2405
-
- @property
- def wheel_axle_length(self):
- return 0.5421
-
- @property
- def base_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
- """
- return np.array([1, 0])
-
- @property
- def arm_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to arm joints.
- """
- return np.array([2, 3, 4, 5, 6])
-
- @property
- def gripper_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to gripper joints.
- """
- return np.array([], dtype=np.int)
-
- @property
- def disabled_collision_pairs(self):
- return [
- ["base_chassis_joint", "pan_joint"],
- ["base_chassis_joint", "tilt_joint"],
- ["base_chassis_joint", "camera_joint"],
- ["jr2_fixed_body_joint", "pan_joint"],
- ["jr2_fixed_body_joint", "tilt_joint"],
- ["jr2_fixed_body_joint", "camera_joint"],
- ]
-
- @property
- def eef_link_name(self):
- return "m1n6s200_end_effector"
-
- @property
- def finger_link_names(self):
- return []
-
- @property
- def finger_joint_names(self):
- return []
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/jr2_urdf/jr2_kinova.urdf")
diff --git a/igibson/robots/jr2_kinova_robot.py b/igibson/robots/jr2_kinova_robot.py
new file mode 100644
index 000000000..5d42d32e7
--- /dev/null
+++ b/igibson/robots/jr2_kinova_robot.py
@@ -0,0 +1,85 @@
+import gym
+import numpy as np
+import pybullet as p
+
+from igibson.external.pybullet_tools.utils import joints_from_names
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class JR2_Kinova(LocomotorRobot):
+ """
+ JR2 Kinova robot
+ Reference: https://cvgl.stanford.edu/projects/jackrabbot/
+ Uses joint velocity control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.wheel_velocity = config.get("wheel_velocity", 0.3)
+ self.wheel_dim = 2
+ self.arm_velocity = config.get("arm_velocity", 1.0)
+ self.arm_dim = 5
+
+ LocomotorRobot.__init__(
+ self,
+ "jr2_urdf/jr2_kinova.urdf",
+ action_dim=self.wheel_dim + self.arm_dim,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="velocity",
+ self_collision=True,
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_high = np.array([self.wheel_velocity] * self.wheel_dim + [self.arm_velocity] * self.arm_dim)
+ self.action_low = -self.action_high
+ self.action_space = gym.spaces.Box(shape=(self.wheel_dim + self.arm_dim,), low=-1.0, high=1.0, dtype=np.float32)
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ assert False, "JR2_Kinova does not support discrete actions"
+
+ def get_end_effector_position(self):
+ """
+ Get end-effector position
+ """
+ return self.parts["m1n6s200_end_effector"].get_position()
+
+ def robot_specific_reset(self):
+ """
+ JR2 Kinova robot specific reset.
+ Initialize JR's arm to about the same height at its neck, facing forward
+ """
+ super(JR2_Kinova, self).robot_specific_reset()
+ self.ordered_joints[2].reset_joint_state(-np.pi / 2.0, 0.0)
+ self.ordered_joints[3].reset_joint_state(np.pi / 2.0, 0.0)
+ self.ordered_joints[4].reset_joint_state(np.pi / 2.0, 0.0)
+ self.ordered_joints[5].reset_joint_state(np.pi / 2.0, 0.0)
+ self.ordered_joints[6].reset_joint_state(0.0, 0.0)
+
+ def load(self):
+ """
+ Load the robot into pybullet. Filter out unnecessary self collision
+ due to modeling imperfection in the URDF
+ """
+ ids = super(JR2_Kinova, self).load()
+ robot_id = self.robot_ids[0]
+
+ disable_collision_names = [
+ ["base_chassis_joint", "pan_joint"],
+ ["base_chassis_joint", "tilt_joint"],
+ ["base_chassis_joint", "camera_joint"],
+ ["jr2_fixed_body_joint", "pan_joint"],
+ ["jr2_fixed_body_joint", "tilt_joint"],
+ ["jr2_fixed_body_joint", "camera_joint"],
+ ]
+ for names in disable_collision_names:
+ link_a, link_b = joints_from_names(robot_id, names)
+ p.setCollisionFilterPair(robot_id, robot_id, link_a, link_b, 0)
+
+ return ids
diff --git a/igibson/robots/jr2_robot.py b/igibson/robots/jr2_robot.py
new file mode 100644
index 000000000..f71a1e9ab
--- /dev/null
+++ b/igibson/robots/jr2_robot.py
@@ -0,0 +1,55 @@
+import gym
+import numpy as np
+
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class JR2(LocomotorRobot):
+ """
+ JR2 robot (no arm)
+ Reference: https://cvgl.stanford.edu/projects/jackrabbot/
+ Uses joint velocity control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.velocity = config.get("velocity", 1.0)
+ LocomotorRobot.__init__(
+ self,
+ "jr2_urdf/jr2.urdf",
+ action_dim=4,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", True),
+ control="velocity",
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+ self.action_high = self.velocity * np.ones([self.action_dim])
+ self.action_low = -self.action_high
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ self.action_list = [
+ [self.velocity, self.velocity, 0, self.velocity],
+ [-self.velocity, -self.velocity, 0, -self.velocity],
+ [self.velocity, -self.velocity, -self.velocity, 0],
+ [-self.velocity, self.velocity, self.velocity, 0],
+ [0, 0, 0, 0],
+ ]
+ self.action_space = gym.spaces.Discrete(len(self.action_list))
+ self.setup_keys_to_action()
+
+ def setup_keys_to_action(self):
+ self.keys_to_action = {
+ (ord("w"),): 0, # forward
+ (ord("s"),): 1, # backward
+ (ord("d"),): 2, # turn right
+ (ord("a"),): 3, # turn left
+ (): 4,
+ }
diff --git a/igibson/robots/legacy/__init__.py b/igibson/robots/legacy/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/igibson/robots/locobot.py b/igibson/robots/locobot.py
deleted file mode 100644
index c86923e98..000000000
--- a/igibson/robots/locobot.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import os
-
-import numpy as np
-
-import igibson
-from igibson.robots.two_wheel_robot import TwoWheelRobot
-from igibson.utils.constants import SemanticClass
-
-
-class Locobot(TwoWheelRobot):
- """
- Locobot robot
- Reference: https://www.trossenrobotics.com/locobot-pyrobot-ros-rover.aspx
- """
-
- @property
- def wheel_radius(self):
- return 0.038
-
- @property
- def wheel_axle_length(self):
- return 0.230
-
- @property
- def base_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
- """
- return np.array([1, 0])
-
- @property
- def default_joint_pos(self):
- return np.zeros(self.n_joints)
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/locobot/locobot.urdf")
diff --git a/igibson/robots/locobot_robot.py b/igibson/robots/locobot_robot.py
new file mode 100644
index 000000000..01a73fd61
--- /dev/null
+++ b/igibson/robots/locobot_robot.py
@@ -0,0 +1,54 @@
+import gym
+import numpy as np
+
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class Locobot(LocomotorRobot):
+ """
+ Locobot robot
+ Reference: https://www.trossenrobotics.com/locobot-pyrobot-ros-rover.aspx
+ Uses differentiable_drive / twist command control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ # https://www.trossenrobotics.com/locobot-pyrobot-ros-rover.aspx
+ # Maximum translational velocity: 70 cm/s
+ # Maximum rotational velocity: 180 deg/s (>110 deg/s gyro performance will degrade)
+ self.linear_velocity = config.get("linear_velocity", 0.5)
+ self.angular_velocity = config.get("angular_velocity", np.pi / 2.0)
+ self.wheel_dim = 2
+ self.wheel_axle_half = 0.115 # half of the distance between the wheels
+ self.wheel_radius = 0.038 # radius of the wheels
+ LocomotorRobot.__init__(
+ self,
+ "locobot/locobot.urdf",
+ base_name="base_link",
+ action_dim=self.wheel_dim,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="differential_drive",
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_high = np.zeros(self.wheel_dim)
+ self.action_high[0] = self.linear_velocity
+ self.action_high[1] = self.angular_velocity
+ self.action_low = -self.action_high
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ assert False, "Locobot does not support discrete actions"
+
+ def get_end_effector_position(self):
+ """
+ Get end-effector position
+ """
+ return self.parts["gripper_link"].get_position()
diff --git a/igibson/robots/locomotion_robot.py b/igibson/robots/locomotion_robot.py
deleted file mode 100644
index 7acd37414..000000000
--- a/igibson/robots/locomotion_robot.py
+++ /dev/null
@@ -1,174 +0,0 @@
-from abc import abstractmethod
-
-import gym
-import numpy as np
-from transforms3d.euler import euler2quat
-from transforms3d.quaternions import qmult, quat2mat
-
-from igibson.controllers import LocomotionController
-from igibson.robots.robot_base import BaseRobot
-from igibson.utils.python_utils import assert_valid_key
-
-
-class LocomotionRobot(BaseRobot):
- """
- Robot that is is equipped with locomotive (navigational) capabilities.
- Provides common interface for a wide variety of robots.
-
- NOTE: controller_config should, at the minimum, contain:
- base: controller specifications for the controller to control this robot's base (locomotion).
- Should include:
-
- - name: Controller to create
- - relevant to the controller being created. Note that all values will have default
- values specified, but setting these individual kwargs will override them
-
- """
-
- def _validate_configuration(self):
- # We make sure that our base controller exists and is a locomotion controller
- assert (
- "base" in self._controllers
- ), "Controller 'base' must exist in controllers! Current controllers: {}".format(list(self._controllers.keys()))
- assert isinstance(
- self._controllers["base"], LocomotionController
- ), "Base controller must be a LocomotionController!"
-
- # run super
- super()._validate_configuration()
-
- def _get_proprioception_dict(self):
- dic = super()._get_proprioception_dict()
-
- # Add base info
- dic["base_qpos"] = self.joint_positions[self.base_control_idx]
- dic["base_qpos_sin"] = np.sin(self.joint_positions[self.base_control_idx])
- dic["base_qpos_cos"] = np.cos(self.joint_positions[self.base_control_idx])
- dic["base_qvel"] = self.joint_velocities[self.base_control_idx]
-
- return dic
-
- @property
- def default_proprio_obs(self):
- obs_keys = super().default_proprio_obs
- return obs_keys + ["base_qpos_sin", "base_qpos_cos", "robot_lin_vel", "robot_ang_vel"]
-
- @property
- def controller_order(self):
- # By default, only base is supported
- return ["base"]
-
- @property
- def _default_controllers(self):
- # Always call super first
- controllers = super()._default_controllers
-
- # For best generalizability use, joint controller as default
- controllers["base"] = "JointController"
-
- return controllers
-
- @property
- def _default_base_joint_controller_config(self):
- """
- :return: Dict[str, Any] Default base joint controller config to control this robot's base. Uses velocity
- control by default.
- """
- return {
- "name": "JointController",
- "control_freq": self.control_freq,
- "motor_type": "velocity",
- "control_limits": self.control_limits,
- "joint_idx": self.base_control_idx,
- "command_output_limits": "default",
- "use_delta_commands": False,
- "use_compliant_mode": True,
- }
-
- @property
- def _default_controller_config(self):
- # Always run super method first
- cfg = super()._default_controller_config
-
- # Add supported base controllers
- cfg["base"] = {
- self._default_base_joint_controller_config["name"]: self._default_base_joint_controller_config,
- }
-
- return cfg
-
- def move_by(self, delta):
- """
- Move robot base without physics simulation
-
- :param delta: Array[float], (x,y,z) cartesian delta base position
- """
- new_pos = np.array(delta) + self.get_position()
- self.robot_body.reset_position(new_pos)
-
- def move_forward(self, delta=0.05):
- """
- Move robot base forward without physics simulation
-
- :param delta: float, delta base position forward
- """
- self.move_by(quat2mat(self.base_link.get_orientation()).dot(np.array([delta, 0, 0])))
-
- def move_backward(self, delta=0.05):
- """
- Move robot base backward without physics simulation
-
- :param delta: float, delta base position backward
- """
- self.move_by(quat2mat(self.base_link.get_orientation()).dot(np.array([-delta, 0, 0])))
-
- def move_left(self, delta=0.05):
- """
- Move robot base left without physics simulation
-
- :param delta: float, delta base position left
- """
- self.move_by(quat2mat(self.base_link.get_orientation()).dot(np.array([0, -delta, 0])))
-
- def move_right(self, delta=0.05):
- """
- Move robot base right without physics simulation
-
- :param delta: float, delta base position right
- """
- self.move_by(quat2mat(self.base_link.get_orientation()).dot(np.array([0, delta, 0])))
-
- def turn_left(self, delta=0.03):
- """
- Rotate robot base left without physics simulation
-
- :param delta: float, delta angle to rotate the base left
- """
- quat = self.base_link.get_orientation()
- quat = qmult((euler2quat(-delta, 0, 0)), quat)
- self.base_link.set_orientation(quat)
-
- def turn_right(self, delta=0.03):
- """
- Rotate robot base right without physics simulation
-
- :param delta: delta angle to rotate the base right
- """
- quat = self.base_link.get_orientation()
- quat = qmult((euler2quat(delta, 0, 0)), quat)
- self.base_link.set_orientation(quat)
-
- def keep_still(self):
- """
- Keep the robot still. Apply zero velocity to all joints.
- """
- for n, j in enumerate(self.joints.values()):
- j.set_vel(0.0)
-
- @property
- @abstractmethod
- def base_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to base joints.
- """
- raise NotImplementedError
diff --git a/igibson/robots/manipulation_robot.py b/igibson/robots/manipulation_robot.py
deleted file mode 100644
index 105cf41e6..000000000
--- a/igibson/robots/manipulation_robot.py
+++ /dev/null
@@ -1,806 +0,0 @@
-from abc import abstractmethod
-from enum import IntEnum
-
-import gym
-import numpy as np
-import pybullet as p
-from transforms3d.euler import euler2quat
-from transforms3d.quaternions import qmult, quat2mat
-
-import igibson.utils.transform_utils as T
-from igibson.controllers import (
- JointController,
- ManipulationController,
- NullGripperController,
- ParallelJawGripperController,
-)
-from igibson.external.pybullet_tools.utils import (
- get_child_frame_pose,
- get_constraint_violation,
- get_joint_info,
- get_relative_pose,
- joints_from_names,
- set_coll_filter,
- set_joint_positions,
-)
-from igibson.robots.robot_base import BaseRobot
-from igibson.utils.constants import SemanticClass
-from igibson.utils.python_utils import assert_valid_key
-
-
-class IsGraspingState(IntEnum):
- TRUE = 1
- UNKNOWN = 0
- FALSE = -1
-
-
-AG_MODES = {
- None,
- "soft",
- "strict",
-}
-
-# Assisted grasping parameters
-ASSIST_FRACTION = 1.0
-ARTICULATED_ASSIST_FRACTION = 0.7
-MIN_ASSIST_FORCE = 0
-MAX_ASSIST_FORCE = 500
-ASSIST_FORCE = MIN_ASSIST_FORCE + (MAX_ASSIST_FORCE - MIN_ASSIST_FORCE) * ASSIST_FRACTION
-CONSTRAINT_VIOLATION_THRESHOLD = 0.1
-RELEASE_WINDOW = 1 / 30.0 # release window in seconds
-
-# is_grasping heuristics parameters
-POS_TOLERANCE = 0.002 # arbitrary heuristic
-VEL_TOLERANCE = 0.01 # arbitrary heuristic
-
-
-class ManipulationRobot(BaseRobot):
- """
- Robot that is is equipped with grasping (manipulation) capabilities.
- Provides common interface for a wide variety of robots.
-
- NOTE: controller_config should, at the minimum, contain:
- arm: controller specifications for the controller to control this robot's arm (manipulation).
- Should include:
-
- - name: Controller to create
- - relevant to the controller being created. Note that all values will have default
- values specified, but setting these individual kwargs will override them
- """
-
- def __init__(
- self,
- control_freq=None,
- action_type="continuous",
- action_normalize=True,
- proprio_obs="default",
- controller_config=None,
- base_name=None,
- scale=1.0,
- self_collision=False,
- class_id=SemanticClass.ROBOTS,
- rendering_params=None,
- assisted_grasping_mode=None,
- ):
- """
- :param control_freq: float, control frequency (in Hz) at which to control the robot. If set to be None,
- simulator.import_robot will automatically set the control frequency to be 1 / render_timestep by default.
- :param action_type: str, one of {discrete, continuous} - what type of action space to use
- :param action_normalize: bool, whether to normalize inputted actions. This will override any default values
- specified by this class.
- :param proprio_obs: str or tuple of str, proprioception observation key(s) to use for generating proprioceptive
- observations. If str, should be exactly "default" -- this results in the default proprioception observations
- being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict for valid key choices
- :param controller_config: None or Dict[str, ...], nested dictionary mapping controller name(s) to specific controller
- configurations for this robot. This will override any default values specified by this class.
- :param base_name: None or str, robot link name that will represent the entire robot's frame of reference. If not None,
- this should correspond to one of the link names found in this robot's corresponding URDF / MJCF file.
- None defaults to the first link name used in @model_file
- :param scale: int, scaling factor for model (default is 1)
- :param self_collision: bool, whether to enable self collision
- :param class_id: SemanticClass, semantic class this robot belongs to. Default is SemanticClass.ROBOTS.
- :param rendering_params: None or Dict[str, Any], If not None, should be keyword-mapped rendering options to set.
- See DEFAULT_RENDERING_PARAMS for the values passed by default.
- :param assisted_grasping_mode: None or str, One of {None, "soft", "strict"}. If None, no assisted grasping
- will be used. If "soft", will magnetize any object touching the gripper's fingers. If "strict" will require
- the object to be within the gripper bounding box before assisting.
- """
- # Store relevant internal vars
- assert_valid_key(key=assisted_grasping_mode, valid_keys=AG_MODES, name="assisted_grasping_mode")
- self.assisted_grasping_mode = assisted_grasping_mode
-
- # Initialize other variables used for assistive grasping
- self._ag_data = None
- self._ag_freeze_joint_pos = {} # Frozen positions for keeping fingers held still
- self._ag_obj_in_hand = None
- self._ag_obj_cid = None
- self._ag_obj_cid_params = {}
- self._ag_freeze_gripper = False
- self._ag_release_counter = None
-
- # Call super() method
- super().__init__(
- control_freq=control_freq,
- action_type=action_type,
- action_normalize=action_normalize,
- proprio_obs=proprio_obs,
- controller_config=controller_config,
- base_name=base_name,
- scale=scale,
- class_id=class_id,
- self_collision=self_collision,
- rendering_params=rendering_params,
- )
-
- def _validate_configuration(self):
- # We make sure that our arm controller exists and is a manipulation controller
- assert "arm" in self._controllers, "Controller 'arm' must exist in controllers! Current controllers: {}".format(
- list(self._controllers.keys())
- )
- assert isinstance(
- self._controllers["arm"], ManipulationController
- ), "Arm controller must be a ManipulationController!"
-
- # We make sure that our gripper controller exists and is a manipulation controller
- assert (
- "gripper" in self._controllers
- ), "Controller 'gripper' must exist in controllers! Current controllers: {}".format(
- list(self._controllers.keys())
- )
- assert isinstance(
- self._controllers["arm"], ManipulationController
- ), "Arm controller must be a ManipulationController!"
-
- # run super
- super()._validate_configuration()
-
- def is_grasping(self, candidate_obj=None):
- """
- Returns True if the robot is grasping the target option @candidate_obj or any object if @candidate_obj is None.
-
- :param candidate_obj: Object or None, object to check if this robot is currently grasping. If None, then
- will be a general (object-agnostic) check for grasping.
- Note: if self.assisted_grasping_mode is None, then @candidate_obj will be ignored completely
-
- :return Array[int]: For each manipulator appendage, returns IsGraspingState.TRUE if it is grasping (potentially
- @candidate_obj if specified), IsGraspingState.FALSE if it is not grasping, and IsGraspingState.UNKNOWN if unknown.
- """
- if self.assisted_grasping_mode is not None:
- is_grasping_obj = (
- self._ag_obj_in_hand is not None if candidate_obj is None else self._ag_obj_in_hand == candidate_obj
- )
- is_grasping = (
- IsGraspingState.TRUE if is_grasping_obj and self._ag_release_counter else IsGraspingState.FALSE
- )
- else:
- gripper_controller = self._controllers["gripper"]
-
- # NullGripperController cannot grasp anything
- if isinstance(gripper_controller, NullGripperController):
- is_grasping = IsGraspingState.FALSE
-
- # JointController does not have any good heuristics to determine is_grasping
- elif isinstance(gripper_controller, JointController):
- is_grasping = IsGraspingState.UNKNOWN
-
- elif isinstance(gripper_controller, ParallelJawGripperController):
- # Independent mode of ParallelJawGripperController does not have any good heuristics to determine is_grasping
- if gripper_controller.mode == "independent":
- is_grasping = IsGraspingState.UNKNOWN
-
- # No control has been issued before
- elif gripper_controller.control is None:
- is_grasping = IsGraspingState.FALSE
-
- else:
- assert np.all(
- gripper_controller.control == gripper_controller.control[0]
- ), "ParallelJawGripperController has different values in the command for non-independent mode"
-
- assert POS_TOLERANCE > gripper_controller.limit_tolerance, (
- "Joint position tolerance for is_grasping heuristics checking is smaller than or equal to the "
- "gripper controller's tolerance of zero-ing out velocities, which makes the heuristics invalid."
- )
-
- finger_pos = self.joint_positions[self.gripper_control_idx]
-
- # For joint position control, if the desired positions are the same as the current positions, is_grasping unknown
- if (
- gripper_controller.motor_type == "position"
- and np.mean(np.abs(finger_pos - gripper_controller.control)) < POS_TOLERANCE
- ):
- is_grasping = IsGraspingState.UNKNOWN
-
- # For joint velocity / torque control, if the desired velocities / torques are zeros, is_grasping unknown
- elif (
- gripper_controller.motor_type in {"velocity", "torque"}
- and np.mean(np.abs(gripper_controller.control)) < VEL_TOLERANCE
- ):
- is_grasping = IsGraspingState.UNKNOWN
-
- # Otherwise, the last control signal intends to "move" the gripper
- else:
- finger_pos = self.joint_positions[self.gripper_control_idx]
- finger_vel = self.joint_velocities[self.gripper_control_idx]
- min_pos = self.joint_lower_limits[self.gripper_control_idx]
- max_pos = self.joint_upper_limits[self.gripper_control_idx]
-
- # Make sure we don't have any invalid values (i.e.: fingers should be within the limits)
- assert np.all(
- (min_pos <= finger_pos) * (finger_pos <= max_pos)
- ), "Got invalid finger joint positions when checking for grasp!"
-
- # Check distance from both ends of the joint limits
- dist_from_lower_limit = finger_pos - min_pos
- dist_from_upper_limit = max_pos - finger_pos
-
- # If the joint positions are not near the joint limits with some tolerance (POS_TOLERANCE)
- valid_grasp_pos = (
- np.mean(dist_from_lower_limit) > POS_TOLERANCE
- and np.mean(dist_from_upper_limit) > POS_TOLERANCE
- )
-
- # And the joint velocities are close to zero with some tolerance (VEL_TOLERANCE)
- valid_grasp_vel = np.all(np.abs(finger_vel) < VEL_TOLERANCE)
-
- # Then the gripper is grasping something, which stops the gripper from reaching its desired state
- is_grasping = is_grasping = (
- IsGraspingState.TRUE if valid_grasp_pos and valid_grasp_vel else IsGraspingState.FALSE
- )
-
- else:
- # Add more cases once we have more gripper controllers available
- raise Exception("Unexpected gripper controller type: {}".format(type(gripper_controller)))
-
- # Return as a numerical array
- return np.array([is_grasping])
-
- def apply_action(self, action):
- # First run assisted grasping
- if self.assisted_grasping_mode is not None:
- self._handle_assisted_grasping(action=action)
-
- # Run super method as normal
- super().apply_action(action)
-
- # Potentially freeze gripper joints
- if self._ag_freeze_gripper:
- self._freeze_gripper()
-
- def release_grasp(self):
- """
- Magic action to release this robot's grasp on an object
- """
- p.removeConstraint(self._ag_obj_cid)
- self._ag_data = None
- self._ag_obj_cid = None
- self._ag_obj_cid_params = {}
- self._ag_freeze_gripper = False
- self._ag_release_counter = 0
-
- def get_control_dict(self):
- # In addition to super method, add in EEF states
- dic = super().get_control_dict()
- dic["task_pos_relative"] = self.get_relative_eef_position()
- dic["task_quat_relative"] = self.get_relative_eef_orientation()
-
- return dic
-
- def _get_proprioception_dict(self):
- dic = super()._get_proprioception_dict()
-
- # Add arm info
- dic["arm_qpos"] = self.joint_positions[self.arm_control_idx]
- dic["arm_qpos_sin"] = np.sin(self.joint_positions[self.arm_control_idx])
- dic["arm_qpos_cos"] = np.cos(self.joint_positions[self.arm_control_idx])
- dic["arm_qvel"] = self.joint_velocities[self.arm_control_idx]
-
- # Add eef and grasping info
- dic["eef_pos_global"] = self.get_eef_position()
- dic["eef_quat_global"] = self.get_eef_orientation()
- dic["eef_pos"] = self.get_relative_eef_position()
- dic["eef_quat"] = self.get_relative_eef_orientation()
- dic["grasp"] = self.is_grasping()
- dic["gripper_qpos"] = self.joint_positions[self.finger_joint_ids]
- dic["gripper_qvel"] = self.joint_velocities[self.finger_joint_ids]
-
- return dic
-
- @property
- def default_proprio_obs(self):
- obs_keys = super().default_proprio_obs
- return obs_keys + ["arm_qpos_sin", "arm_qpos_cos", "eef_pos", "eef_quat", "gripper_qpos", "grasp"]
-
- @property
- def controller_order(self):
- # Assumes we have an arm and a gripper
- return ["arm", "gripper"]
-
- @property
- def _default_controllers(self):
- # Always call super first
- controllers = super()._default_controllers
-
- # For best generalizability use, joint controller as default
- controllers["arm"] = "JointController"
- controllers["gripper"] = "JointController"
-
- return controllers
-
- @property
- @abstractmethod
- def eef_link_name(self):
- """
- :return str: Name of the EEF link, should correspond to specific link name in this robot's underlying model file
- """
- raise NotImplementedError
-
- @property
- @abstractmethod
- def finger_link_names(self):
- """
- :return list: Array of link names corresponding to this robot's fingers
- """
- raise NotImplementedError
-
- @property
- @abstractmethod
- def finger_joint_names(self):
- """
- :return list: Array of joint names corresponding to this robot's fingers
- """
- raise NotImplementedError
-
- @property
- @abstractmethod
- def arm_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to arm joints.
- """
- raise NotImplementedError
-
- @property
- @abstractmethod
- def gripper_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to gripper joints.
- """
- raise NotImplementedError
-
- @property
- def eef_link_id(self):
- """
- :return int: Link ID corresponding to the eef link
- """
- return self._links[self.eef_link_name].link_id
-
- @property
- def finger_link_ids(self):
- """
- :return list: Link IDs corresponding to the eef fingers
- """
- return [self._links[link].link_id for link in self.finger_link_names]
-
- @property
- def finger_joint_ids(self):
- """
- :return list: Joint IDs corresponding to the eef fingers
- """
- return [self._joints[joint].joint_id for joint in self.finger_joint_names]
-
- @property
- def gripper_link_to_grasp_point(self):
- """
- :return Array[float]: (dx,dy,dz) relative distance from the gripper link frame to the expected center
- of the robot's grasping point. Unique to each robot embodiment.
- """
- raise NotImplementedError
-
- def get_eef_position(self):
- """
- :return Array[float]: (x,y,z) global end-effector Cartesian position for this robot's end-effector
- """
- return self._links[self.eef_link_name].get_position()
-
- def get_eef_orientation(self):
- """
- :return Array[float]: (x,y,z,w) global quaternion orientation for this robot's end-effector
- """
- return self._links[self.eef_link_name].get_orientation()
-
- def get_relative_eef_pose(self, mat=True):
- """
- :param mat: bool, whether to return pose in matrix form (mat=True) or (pos, quat) tuple (mat=False)
-
- :return Tuple[Array[float], Array[float]] or Array[Array[float]]: End-effector pose, either in 4x4 homogeneous
- matrix form (if @mat=True) or (pos, quat) tuple (if @mat=False)
- """
- pose = get_relative_pose(body=self.get_body_id(), link1=self.eef_link_id)
- return T.pose2mat(pose) if mat else pose
-
- def get_relative_eef_position(self):
- """
- :return Array[float]: (x,y,z) Cartesian position of end-effector relative to robot base frame
- """
- return get_relative_pose(body=self.get_body_id(), link1=self.eef_link_id)[0]
-
- def get_relative_eef_orientation(self):
- """
- :return Array[float]: (x,y,z,z) quaternion orientation of end-effector relative to robot base frame
- """
- return get_relative_pose(body=self.get_body_id(), link1=self.eef_link_id)[1]
-
- def _calculate_in_hand_object(self):
- """
- Calculates which object to assisted-grasp. Returns an (object_id, link_id) tuple or None
- if no valid AG-enabled object can be found.
-
- :return None or Tuple[int, int]: If a valid assisted-grasp object is found, returns the corresponding
- (object_id, link_id) corresponding to the contact point of that object. Otherwise, returns None
- """
- # Step 1: Find all objects in contact with all finger joints
- contact_groups = [
- p.getContactPoints(bodyA=self.get_body_id(), linkIndexA=link_id) for link_id in self.finger_link_ids
- ]
-
- # Step 2: Process contacts and get intersection over all sets of contacts
- contact_dict = {}
- candidates_set = set()
- for contact_group in contact_groups:
- contact_set = set()
- for contact in contact_group:
- c_name = contact[2]
- contact_set.add(c_name)
- if c_name not in contact_dict:
- contact_dict[c_name] = []
- contact_dict[c_name].append(
- {
- "contact_position": contact[5],
- "target_link": contact[4],
- }
- )
- candidates_set = candidates_set.intersection(contact_set)
-
- # Immediately return if there are no valid candidates
- if len(candidates_set) == 0:
- return None
-
- # Step 3: If we're using strict assisted grasping, filter candidates by checking to make sure target is
- # inside bounding box
- if self.assisted_grasping_mode == "strict":
- contact_dict = self._filter_assisted_grasp_candidates(contact_dict=contact_dict, candidates=candidates_set)
-
- # Step 4: Find the closest object to the gripper center among these "inside" objects
- gripper_state = p.getLinkState(self.get_body_id(), self.eef_link_id)
- # Compute gripper bounding box
- gripper_center_pos, _ = p.multiplyTransforms(*gripper_state, self.gripper_link_to_grasp_point, [0, 0, 0, 1])
-
- candidate_data = []
- for candidate in candidates_set:
- for contact_point_data in contact_dict[candidate]:
- dist = np.linalg.norm(np.array(contact_point_data["contact_position"]) - np.array(gripper_center_pos))
- candidate_data.append((candidate, contact_point_data["target_link"], dist))
-
- candidate_data = sorted(candidate_data, key=lambda x: x[2])
- if len(candidate_data) > 0:
- ag_bid, ag_link, _ = candidate_data[0]
- else:
- return None
-
- # Return None if any of the following edge cases are activated
- if not self.simulator.can_assisted_grasp(ag_bid, ag_link) or (self.get_body_id() == ag_bid):
- return None
-
- return ag_bid, ag_link
-
- def _filter_assisted_grasp_candidates(self, contact_dict, candidates):
- """
- Check all contact candidates and filter out any that are not within the bounding box of the gripper.
- Should be implemented by subclass.
-
- :param contact_dict: Dict[str, Array[Dict[str, float]]], Dictionary containing relevant per-contact point
- information
- :param candidates: Set[str], Valid candidates to check for bounding box within gripper
-
- :return: Dict[str, Array[Dict[str, float]]], Filtered dictionary containing relevant per-contact point
- information
- """
- raise NotImplementedError
-
- def _handle_release_window(self):
- """
- Handles releasing an object
- """
- self._ag_release_counter += 1
- time_since_release = self._ag_release_counter * self.simulator.render_timestep
- if time_since_release >= RELEASE_WINDOW:
- set_coll_filter(
- target_id=self._ag_obj_in_hand,
- body_id=self.get_body_id(),
- body_links=self.finger_joint_ids,
- enable=True,
- )
- self._ag_obj_in_hand = None
- self._ag_release_counter = None
-
- def _freeze_gripper(self):
- """
- Freezes gripper finger joints - used in assisted grasping.
- """
- for joint_index, j_val in self._ag_freeze_joint_pos.items():
- p.resetJointState(self.get_body_id(), joint_index, targetValue=j_val, targetVelocity=0.0)
-
- @property
- def _default_arm_joint_controller_config(self):
- """
- :return: Dict[str, Any] Default arm joint controller config to control this robot's arm. Uses velocity
- control by default.
- """
- return {
- "name": "JointController",
- "control_freq": self.control_freq,
- "motor_type": "velocity",
- "control_limits": self.control_limits,
- "joint_idx": self.arm_control_idx,
- "command_output_limits": "default",
- "use_delta_commands": False,
- "use_compliant_mode": True,
- }
-
- @property
- def _default_arm_ik_controller_config(self):
- """
- :return: Dict[str, Any] Default controller config for an Inverse kinematics controller to control this robot's
- arm
- """
- return {
- "name": "InverseKinematicsController",
- "base_body_id": self.get_body_id(),
- "task_link_id": self.eef_link_id,
- "control_freq": self.control_freq,
- "default_joint_pos": self.default_joint_pos,
- "joint_damping": self.joint_damping,
- "control_limits": self.control_limits,
- "joint_idx": self.arm_control_idx,
- "command_output_limits": (
- np.array([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
- np.array([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
- ),
- "kv": 2.0,
- "mode": "pose_delta_ori",
- "smoothing_filter_size": 2,
- "workspace_pose_limiter": None,
- }
-
- @property
- def _default_gripper_parallel_jaw_controller_config(self):
- """
- :return: Dict[str, Any] Default controller config to control this robot's parallel jaw gripper. Assumes
- robot gripper idx has exactly two elements
- """
- return {
- "name": "ParallelJawGripperController",
- "control_freq": self.control_freq,
- "motor_type": "position",
- "control_limits": self.control_limits,
- "joint_idx": self.gripper_control_idx,
- "command_output_limits": "default",
- "mode": "binary",
- "limit_tolerance": 0.001,
- }
-
- @property
- def _default_gripper_joint_controller_config(self):
- """
- :return: Dict[str, Any] Default gripper joint controller config to control this robot's gripper
- """
- return {
- "name": "JointController",
- "control_freq": self.control_freq,
- "motor_type": "velocity",
- "control_limits": self.control_limits,
- "joint_idx": self.gripper_control_idx,
- "command_output_limits": "default",
- "use_delta_commands": False,
- "use_compliant_mode": True,
- }
-
- @property
- def _default_gripper_null_controller_config(self):
- """
- :return: Dict[str, Any] Default gripper null controller config to control this robot's (non-prehensile) gripper
- i.e. dummy controller
- """
- return {
- "name": "NullGripperController",
- "control_freq": self.control_freq,
- "control_limits": self.control_limits,
- }
-
- @property
- def _default_controller_config(self):
- # Always run super method first
- cfg = super()._default_controller_config
-
- # Add arm and gripper defaults
- cfg["arm"] = {
- self._default_arm_ik_controller_config["name"]: self._default_arm_ik_controller_config,
- self._default_arm_joint_controller_config["name"]: self._default_arm_joint_controller_config,
- }
- cfg["gripper"] = {
- self._default_gripper_parallel_jaw_controller_config[
- "name"
- ]: self._default_gripper_parallel_jaw_controller_config,
- self._default_gripper_joint_controller_config["name"]: self._default_gripper_joint_controller_config,
- self._default_gripper_null_controller_config["name"]: self._default_gripper_null_controller_config,
- }
-
- return cfg
-
- def _establish_grasp(self, ag_data):
- """
- Establishes an ag-assisted grasp, if enabled.
-
- :param ag_data: Tuple[int, int], assisted-grasp object body ID and link ID
- """
- ag_bid, ag_link = ag_data
-
- child_frame_pos, child_frame_orn = get_child_frame_pose(
- parent_bid=self.get_body_id(), parent_link=self.eef_link_id, child_bid=ag_bid, child_link=ag_link
- )
-
- # If we grab a child link of a URDF, create a p2p joint
- if ag_link == -1:
- joint_type = p.JOINT_FIXED
- else:
- joint_type = p.JOINT_POINT2POINT
-
- self.obj_cid = p.createConstraint(
- parentBodyUniqueId=self.get_body_id(),
- parentLinkIndex=self.eef_link_id,
- childBodyUniqueId=ag_bid,
- childLinkIndex=ag_link,
- jointType=joint_type,
- jointAxis=(0, 0, 0),
- parentFramePosition=(0, 0, 0),
- childFramePosition=child_frame_pos,
- childFrameOrientation=child_frame_orn,
- )
- # Modify max force based on user-determined assist parameters
- if ag_link == -1:
- max_force = ASSIST_FORCE
- else:
- max_force = ASSIST_FORCE * ARTICULATED_ASSIST_FRACTION
- p.changeConstraint(self._ag_obj_cid, maxForce=max_force)
-
- self._ag_obj_cid_params = {
- "childBodyUniqueId": ag_bid,
- "childLinkIndex": ag_link,
- "jointType": joint_type,
- "maxForce": max_force,
- }
- self._ag_obj_in_hand = ag_bid
- self._ag_freeze_gripper = True
- # Disable collisions while picking things up
- set_coll_filter(target_id=ag_bid, body_id=self.get_body_id(), body_links=self.finger_joint_ids, enable=False)
- for joint_index in self.gripper_finger_joint_ids:
- j_val = p.getJointState(self.get_body_id(), joint_index)[0]
- self._ag_freeze_joint_pos[joint_index] = j_val
-
- def _handle_assisted_grasping(self, action):
- """
- Handles assisted grasping.
-
- :param action: Array[action], gripper action to apply. >= 0 is release (open), < 0 is grasp (close).
- """
- # Make sure gripper action dimension is only 1
- assert (
- self._controllers["gripper"].mode == "binary"
- ), "Gripper controller command dim must be 1 to use assisted grasping, got: {}".format(
- self._controllers["gripper"].command_dim
- )
-
- applying_grasp = action[self.controller_action_idx["gripper"]] < 0.0
- releasing_grasp = action[self.controller_action_idx["gripper"]] >= 0.0
-
- # Execute gradual release of object
- if self._ag_obj_in_hand:
- if self._ag_release_counter is not None:
- self._handle_release_window()
- else:
- constraint_violated = get_constraint_violation(self._ag_obj_cid) > CONSTRAINT_VIOLATION_THRESHOLD
- if constraint_violated or releasing_grasp:
- self._release_grasp()
-
- elif applying_grasp:
- self._ag_data = self._calculate_in_hand_object()
- if self._ag_data:
- self._establish_grasp(self._ag_data)
-
- def dump_state(self):
- if self.assisted_grasping_mode is None:
- return
-
- # Recompute child frame pose because it could have changed since the
- # constraint has been created
- if self._ag_obj_cid is not None:
- ag_bid = self._ag_obj_cid_params["childBodyUniqueId"]
- ag_link = self._ag_obj_cid_params["childLinkIndex"]
- child_frame_pos, child_frame_orn = get_child_frame_pose(
- parent_bid=self.get_body_id(), parent_link=self.eef_link_id, child_bid=ag_bid, child_link=ag_link
- )
- self._ag_obj_cid_params.update(
- {
- "childFramePosition": child_frame_pos,
- "childFrameOrientation": child_frame_orn,
- }
- )
-
- return {
- "_ag_obj_in_hand": self._ag_obj_in_hand,
- "_ag_release_counter": self._ag_release_counter,
- "_ag_freeze_gripper": self._ag_freeze_gripper,
- "_ag_freeze_joint_pos": self._ag_freeze_joint_pos,
- "_ag_obj_cid": self._ag_obj_cid,
- "_ag_obj_cid_params": self._ag_obj_cid_params,
- }
-
- def load_state(self, dump):
- if self.assisted_grasping_mode is None:
- return
-
- # Cancel the previous AG if exists
- if self._ag_obj_cid is not None:
- p.removeConstraint(self._ag_obj_cid)
-
- if self._ag_obj_in_hand is not None:
- set_coll_filter(
- target_id=self._ag_obj_in_hand,
- body_id=self.get_body_id(),
- body_links=self.finger_joint_ids,
- enable=True,
- )
-
- # For backwards compatibility, if the newest version of the string doesn't exist, we try to use the old string
- _ag_obj_in_hand_str = "_ag_obj_in_hand" if "_ag_obj_in_hand" in dump else "object_in_hand"
- _ag_release_counter_str = "_ag_release_counter" if "_ag_release_counter" in dump else "release_counter"
- _ag_freeze_gripper_str = "_ag_freeze_gripper" if "_ag_freeze_gripper" in dump else "should_freeze_joints"
- _ag_freeze_joint_pos_str = "_ag_freeze_joint_pos" if "_ag_freeze_joint_pos" in dump else "freeze_vals"
- _ag_obj_cid_str = "_ag_obj_cid" if "_ag_obj_cid" in dump else "obj_cid"
- _ag_obj_cid_params_str = "_ag_obj_cid_params" if "_ag_obj_cid_params" in dump else "obj_cid_params"
-
- self._ag_obj_in_hand = dump[_ag_obj_in_hand_str]
- self._ag_release_counter = dump[_ag_release_counter_str]
- self._ag_freeze_gripper = dump[_ag_freeze_gripper_str]
- self._ag_freeze_joint_pos = {int(key): val for key, val in dump[_ag_freeze_joint_pos_str].items()}
- self._ag_obj_cid = dump[_ag_obj_cid_str]
- self._ag_obj_cid_params = dump[_ag_obj_cid_params_str]
- if self._ag_obj_cid is not None:
- self._ag_obj_cid = p.createConstraint(
- parentBodyUniqueId=self.get_body_id(),
- parentLinkIndex=self.eef_link_id,
- childBodyUniqueId=dump[_ag_obj_cid_params_str]["childBodyUniqueId"],
- childLinkIndex=dump[_ag_obj_cid_params_str]["childLinkIndex"],
- jointType=dump[_ag_obj_cid_params_str]["jointType"],
- jointAxis=(0, 0, 0),
- parentFramePosition=(0, 0, 0),
- childFramePosition=dump[_ag_obj_cid_params_str]["childFramePosition"],
- childFrameOrientation=dump[_ag_obj_cid_params_str]["childFrameOrientation"],
- )
- p.changeConstraint(self._ag_obj_cid, maxForce=dump[_ag_obj_cid_params_str]["maxForce"])
-
- if self._ag_obj_in_hand is not None:
- set_coll_filter(
- target_id=self._ag_obj_in_hand,
- body_id=self.get_body_id(),
- body_links=self.finger_joint_ids,
- enable=False,
- )
-
- def can_toggle(self, toggle_position, toggle_distance_threshold):
- for joint_id in self.finger_joint_ids:
- finger_link_state = p.getLinkState(self.get_body_id(), joint_id)
- link_pos = finger_link_state[0]
- if np.linalg.norm(np.array(link_pos) - np.array(toggle_position)) < toggle_distance_threshold:
- return True
- return False
diff --git a/igibson/robots/legacy/minitaur_robot.py b/igibson/robots/minitaur_robot.py
similarity index 98%
rename from igibson/robots/legacy/minitaur_robot.py
rename to igibson/robots/minitaur_robot.py
index da666be44..e7796c16f 100644
--- a/igibson/robots/legacy/minitaur_robot.py
+++ b/igibson/robots/minitaur_robot.py
@@ -9,12 +9,12 @@
import pybullet as p
from igibson.physics import motor
-from igibson.robots.locomotion_robot import LocomotionRobot
+from igibson.robots.robot_locomotor import LocomotorRobot
tracking_camera = {"yaw": 20, "z_offset": 0.3, "distance": 2, "pitch": -20}
-class MinitaurBase(LocomotionRobot):
+class MinitaurBase(LocomotorRobot):
"""
Minitaur robot
Reference: https://www.ghostrobotics.io/
@@ -66,7 +66,7 @@ class MinitaurBase(LocomotionRobot):
"""The minitaur class that simulates a quadruped robot from Ghost Robotics.
"""
- def __init__(self, config, env=None, pd_control_enabled=True, accurate_motor_model_enabled=True, **kwargs):
+ def __init__(self, config, env=None, pd_control_enabled=True, accurate_motor_model_enabled=True):
"""Constructs a minitaur and reset it to the initial states.
Properties:
@@ -93,7 +93,7 @@ def __init__(self, config, env=None, pd_control_enabled=True, accurate_motor_mod
self.robot_name = "base_chassis_link"
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
- LocomotionRobot.__init__(
+ LocomotorRobot.__init__(
self,
"quadruped/minitaur.urdf",
self.robot_name,
@@ -105,7 +105,6 @@ def __init__(self, config, env=None, pd_control_enabled=True, accurate_motor_mod
target_pos=config["target_pos"],
resolution=config["resolution"],
env=env,
- **kwargs
)
self.r_f = 0.1
@@ -170,13 +169,13 @@ def _BuildJointNameToIdDict(self):
def _BuildMotorIdList(self):
self._motor_id_list = [self.joint_name_to_id[motor_name] for motor_name in self.MOTOR_NAMES]
- def reset(self, reload_urdf=True):
+ def robot_specific_reset(self, reload_urdf=True):
"""Reset the minitaur to its initial states.
:param reload_urdf: whether to reload the urdf file. If not, reset() just place the minitaur back to its starting position.
"""
if self.minitaur is None:
- self.minitaur = self.get_body_id()
+ self.minitaur = self.robot_ids[0]
if self.joint_name_to_id is None:
self._BuildJointNameToIdDict()
diff --git a/igibson/robots/legacy/quadrotor_robot.py b/igibson/robots/quadrotor_robot.py
similarity index 87%
rename from igibson/robots/legacy/quadrotor_robot.py
rename to igibson/robots/quadrotor_robot.py
index 3e3acb65a..b48316ad0 100644
--- a/igibson/robots/legacy/quadrotor_robot.py
+++ b/igibson/robots/quadrotor_robot.py
@@ -2,20 +2,20 @@
import numpy as np
import pybullet as p
-from igibson.robots.robot_locomotor import LocomotionRobot
+from igibson.robots.robot_locomotor import LocomotorRobot
-class Quadrotor(LocomotionRobot):
+class Quadrotor(LocomotorRobot):
"""
Quadrotor robot
Reference: https://repository.upenn.edu/cgi/viewcontent.cgi?referer=https://www.google.com/&httpsredir=1&article=1705&context=edissertations
Uses joint torque control
"""
- def __init__(self, config, **kwargs):
+ def __init__(self, config):
self.config = config
self.torque = config.get("torque", 0.02)
- LocomotionRobot.__init__(
+ LocomotorRobot.__init__(
self,
"quadrotor/quadrotor.urdf",
action_dim=6,
@@ -23,7 +23,6 @@ def __init__(self, config, **kwargs):
scale=config.get("robot_scale", 1.0),
is_discrete=config.get("is_discrete", False),
control="torque",
- **kwargs
)
def set_up_continuous_action_space(self):
@@ -56,7 +55,7 @@ def apply_action(self, action):
"""
real_action = self.policy_action_to_robot_action(action)
p.setGravity(0, 0, 0)
- p.resetBaseVelocity(self.get_body_id(), real_action[:3], real_action[3:])
+ p.resetBaseVelocity(self.robot_ids[0], real_action[:3], real_action[3:])
def setup_keys_to_action(self):
self.keys_to_action = {
diff --git a/igibson/robots/robot_base.py b/igibson/robots/robot_base.py
index b524543d3..b3cd32f71 100644
--- a/igibson/robots/robot_base.py
+++ b/igibson/robots/robot_base.py
@@ -1,1131 +1,306 @@
import logging
import os
-from abc import ABCMeta, abstractmethod
-from collections import OrderedDict
-from copy import deepcopy
-import gym
import numpy as np
import pybullet as p
-from future.utils import with_metaclass
-from transforms3d.euler import euler2quat
-from transforms3d.quaternions import qmult, quat2mat
import igibson
-from igibson.controllers import ControlType, create_controller
-from igibson.external.pybullet_tools.utils import (
- get_child_frame_pose,
- get_constraint_violation,
- get_joint_info,
- get_relative_pose,
- joints_from_names,
- set_coll_filter,
- set_joint_positions,
-)
from igibson.object_states.factory import prepare_object_states
-from igibson.utils.constants import SemanticClass
-from igibson.utils.python_utils import assert_valid_key, merge_nested_dicts
-from igibson.utils.utils import rotate_vector_3d
-# Global dicts that will contain mappings
-REGISTERED_ROBOTS = {}
-
-def register_robot(cls):
- if cls.__name__ not in REGISTERED_ROBOTS:
- REGISTERED_ROBOTS[cls.__name__] = cls
-
-
-class BaseRobot(with_metaclass(ABCMeta, object)):
+class BaseRobot(object):
"""
- Base class for mujoco xml/ROS urdf based robot agents.
-
- This class handles object loading, and provides method interfaces that should be
- implemented by subclassed robots.
+ Base class for mujoco xml/ROS urdf based agents.
+ Handles object loading
"""
- DEFAULT_RENDERING_PARAMS = {
- "use_pbr": True,
- "use_pbr_mapping": True,
- "shadow_caster": True,
- }
-
- def __init_subclass__(cls, **kwargs):
- """
- Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
- user additions. This way, users can add their custom robot by simply extending this Robot class,
- and it will automatically be registered internally. This allows users to then specify their robot
- directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
- in our code.
+ def __init__(self, model_file, base_name=None, scale=1, self_collision=False):
"""
- register_robot(cls)
-
- def __init__(
- self,
- control_freq=None,
- action_type="continuous",
- action_normalize=True,
- proprio_obs="default",
- controller_config=None,
- base_name=None,
- scale=1.0,
- self_collision=False,
- class_id=SemanticClass.ROBOTS,
- rendering_params=None,
- ):
+ :param model_file: model filename
+ :param base_name: name of the base link
+ :param scale: scale, default to 1
+ :param self_collision: whether to enable self collision
"""
- :param control_freq: float, control frequency (in Hz) at which to control the robot. If set to be None,
- simulator.import_robot will automatically set the control frequency to be 1 / render_timestep by default.
- :param action_type: str, one of {discrete, continuous} - what type of action space to use
- :param action_normalize: bool, whether to normalize inputted actions. This will override any default values
- specified by this class.
- :param proprio_obs: str or tuple of str, proprioception observation key(s) to use for generating proprioceptive
- observations. If str, should be exactly "default" -- this results in the default proprioception observations
- being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict for valid key choices
- :param controller_config: None or Dict[str, ...], nested dictionary mapping controller name(s) to specific controller
- configurations for this robot. This will override any default values specified by this class.
- :param base_name: None or str, robot link name that will represent the entire robot's frame of reference. If not None,
- this should correspond to one of the link names found in this robot's corresponding URDF / MJCF file.
- None defaults to the base link name used in @model_file
- :param scale: int, scaling factor for model (default is 1)
- :param self_collision: bool, whether to enable self collision
- :param class_id: SemanticClass, semantic class this robot belongs to. Default is SemanticClass.ROBOTS.
- :param rendering_params: None or Dict[str, Any], If not None, should be keyword-mapped rendering options to set.
- See DEFAULT_RENDERING_PARAMS for the values passed by default.
- """
- # Store arguments
+ self.parts = None
+ self.jdict = None
+ self.ordered_joints = None
+ self.robot_body = None
+ self.robot_name = None
self.base_name = base_name
- self.control_freq = control_freq
- self.scale = scale
- self.self_collision = self_collision
- assert_valid_key(key=action_type, valid_keys={"discrete", "continuous"}, name="action type")
- self.action_type = action_type
- self.action_normalize = action_normalize
- self.proprio_obs = self.default_proprio_obs if proprio_obs == "default" else list(proprio_obs)
- self.controller_config = {} if controller_config is None else controller_config
-
- # Initialize internal attributes that will be loaded later
- # These will have public interfaces
- self.simulator = None
- self.model_type = None
- self.action_list = None # Array of discrete actions to deploy
- self._links = None
- self._joints = None
- self._controllers = None
- self._mass = None
- self._joint_state = { # This is filled in periodically every time self.update_state() is called
- "unnormalized": {
- "position": None,
- "velocity": None,
- "torque": None,
- },
- "normalized": {
- "position": None,
- "velocity": None,
- "torque": None,
- },
- "at_limits": None,
- }
-
- # TODO: Replace this with a reasonable StatefulObject inheritance.
- prepare_object_states(self, abilities={"robot": {}})
-
- # This logic is repeated because Robot does not inherit from Object.
- # TODO: Remove this logic once the object refactoring is complete.
- self.class_id = class_id
- self._rendering_params = dict(self.DEFAULT_RENDERING_PARAMS)
- if rendering_params is not None:
- self._rendering_params.update(rendering_params)
-
- self._loaded = False
- self._body_ids = None # this is the unique pybullet body id(s) representing this model
- self.states = {}
-
- def load(self, simulator):
- """
- Load object into pybullet and return list of loaded body ids.
- :param simulator: Simulator, iGibson simulator reference
-
- :return Array[int]: List of unique pybullet IDs corresponding to this model. This will usually
- only be a single value
- """
- if self._loaded:
- raise ValueError("Cannot load a single model multiple times.")
- self._loaded = True
-
- # Store body ids and return them
- _body_ids = self._load(simulator)
-
- # A persistent reference to simulator is needed for AG in ManipulationRobot
- self.simulator = simulator
- return _body_ids
-
- def _load(self, simulator):
- """
- Loads this pybullet model into the simulation. Should return a list of unique body IDs corresponding
- to this model.
-
- :param simulator: Simulator, iGibson simulator reference
-
- :return Array[int]: List of unique pybullet IDs corresponding to this model. This will usually
- only be a single value
- """
+ self.robot_ids = None
+ self.robot_mass = None
+ self.model_file = model_file
+ self.physics_model_dir = os.path.join(igibson.assets_path, "models")
+ self.scale = scale
+ self.eyes = None
logging.info("Loading robot model file: {}".format(self.model_file))
-
- # Set flags for loading model
- flags = p.URDF_USE_MATERIAL_COLORS_FROM_MTL
- if self.self_collision:
- flags = flags | p.URDF_USE_SELF_COLLISION | p.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT
-
- # Run some sanity checks and load the model
- model_file_type = self.model_file.split(".")[-1]
- if model_file_type == "urdf":
+ if self.model_file[-4:] == "urdf":
self.model_type = "URDF"
- body_ids = (p.loadURDF(self.model_file, globalScaling=self.scale, flags=flags),)
else:
self.model_type = "MJCF"
- assert self.scale == 1.0, (
- "robot scale must be 1.0 because pybullet does not support scaling " "for MJCF model (p.loadMJCF)"
- )
- body_ids = p.loadMJCF(self.model_file, flags=flags)
-
- # Store body ids
- self._body_ids = body_ids
-
- # Load into simulator and initialize states
- for body_id in self._body_ids:
- simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
-
- for state in self.states.values():
- state.initialize(simulator)
-
- # Grab relevant references from the body IDs
- self._setup_references()
-
- # Disable collisions
- for names in self.disabled_collision_pairs:
- link_a, link_b = joints_from_names(self.get_body_id(), names)
- p.setCollisionFilterPair(self.get_body_id(), self.get_body_id(), link_a, link_b, 0)
-
- # Load controllers
- self._load_controllers()
+ assert self.scale == 1, "pybullet does not support scaling for MJCF model (p.loadMJCF)"
- # Setup action space
- self._action_space = (
- self._create_discrete_action_space()
- if self.action_type == "discrete"
- else self._create_continuous_action_space()
- )
-
- # Validate this robot configuration
- self._validate_configuration()
-
- # Return the body IDs
- return body_ids
-
- def _setup_references(self):
- """
- Parse the set of robot @body_ids to get properties including joint information and mass
- """
- assert len(self._body_ids) == 1, "Only one robot body ID was expected, but got {}!".format(len(self._body_ids))
- robot_id = self.get_body_id()
-
- # Initialize link and joint dictionaries for this robot
- self._links, self._joints, self._mass = OrderedDict(), OrderedDict(), 0.0
-
- # Grab model base info
- base_name = p.getBodyInfo(robot_id)[0].decode("utf8")
- self._links[base_name] = RobotLink(base_name, -1, robot_id)
- # if base_name is unspecified, use this link as robot_body (base_link).
- if self.base_name is None:
- self.base_name = base_name
+ self.states = {}
+ prepare_object_states(self, online=True)
- # Loop through all robot links and infer relevant link / joint / mass references
- for j in range(p.getNumJoints(robot_id)):
- self._mass += p.getDynamicsInfo(robot_id, j)[0]
- p.setJointMotorControl2(robot_id, j, p.POSITION_CONTROL, positionGain=0.1, velocityGain=0.1, force=0)
- _, joint_name, joint_type, _, _, _, _, _, _, _, _, _, link_name, _, _, _, _ = p.getJointInfo(robot_id, j)
- logging.debug("Robot joint: {}".format(p.getJointInfo(robot_id, j)))
- joint_name = joint_name.decode("utf8")
- link_name = link_name.decode("utf8")
- self._links[link_name] = RobotLink(link_name, j, robot_id)
+ # For BEHAVIOR compatibility -- may be removed eventually
+ self.category = "agent"
- # We additionally create joint references if they are (not) of certain types
- if joint_name[:6] == "ignore":
- # We don't save a reference to this joint, but we disable its motor
- RobotJoint(joint_name, j, robot_id).disable_motor()
- elif joint_name[:8] == "jointfix" or joint_type == p.JOINT_FIXED:
- # Fixed joint, so we don't save a reference to this joint
- pass
- else:
- # Default case, we store a reference
- self._joints[joint_name] = RobotJoint(joint_name, j, robot_id)
-
- # Populate the joint states
- self.update_state()
-
- # Update the configs
- for group in self.controller_order:
- group_controller_name = (
- self.controller_config[group]["name"]
- if group in self.controller_config and "name" in self.controller_config[group]
- else self._default_controllers[group]
- )
- self.controller_config[group] = merge_nested_dicts(
- base_dict=self._default_controller_config[group][group_controller_name],
- extra_dict=self.controller_config.get(group, {}),
- )
+ self.self_collision = self_collision
- def _validate_configuration(self):
- """
- Run any needed sanity checks to make sure this robot was created correctly.
+ def load(self):
"""
- pass
+ Load the robot model into pybullet
- def update_state(self):
+ :return: body id in pybullet
"""
- Updates the internal proprioceptive state of this robot, and returns the raw values
+ flags = p.URDF_USE_MATERIAL_COLORS_FROM_MTL
+ if self.self_collision:
+ flags = flags | p.URDF_USE_SELF_COLLISION | p.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT
- :return Tuple[Array[float], Array[float]]: The raw joint states, normalized joint states
- for this robot
- """
- # Grab raw values
- joint_states = np.array([j.get_state() for j in self._joints.values()]).astype(np.float32).flatten()
- joint_states_normalized = (
- np.array([j.get_relative_state() for j in self._joints.values()]).astype(np.float32).flatten()
- )
+ if self.model_type == "MJCF":
+ self.robot_ids = p.loadMJCF(os.path.join(self.physics_model_dir, self.model_file), flags=flags)
+ if self.model_type == "URDF":
+ self.robot_ids = (
+ p.loadURDF(
+ os.path.join(self.physics_model_dir, self.model_file), globalScaling=self.scale, flags=flags
+ ),
+ )
- # Get raw joint values and normalized versions
- self._joint_state["unnormalized"]["position"] = joint_states[0::3]
- self._joint_state["unnormalized"]["velocity"] = joint_states[1::3]
- self._joint_state["unnormalized"]["torque"] = joint_states[2::3]
- self._joint_state["normalized"]["position"] = joint_states_normalized[0::3]
- self._joint_state["normalized"]["velocity"] = joint_states_normalized[1::3]
- self._joint_state["normalized"]["torque"] = joint_states_normalized[2::3]
+ self.parts, self.jdict, self.ordered_joints, self.robot_body, self.robot_mass = self.parse_robot(self.robot_ids)
- # Infer whether joints are at their limits
- self._joint_state["at_limits"] = 1.0 * (np.abs(self.joint_positions_normalized) > 0.99)
+ assert (
+ "eyes" in self.parts
+ ), 'Please add a link named "eyes" in your robot URDF file with the same pose as the onboard camera. Feel free to check out assets/models/turtlebot/turtlebot.urdf for an example.'
+ self.eyes = self.parts["eyes"]
- # Return the raw joint states
- return joint_states, joint_states_normalized
+ return self.robot_ids
- def calc_state(self):
+ def parse_robot(self, bodies):
"""
- Calculate proprioceptive states for the robot. By default, this is:
- [pos, rpy, lin_vel, ang_vel, joint_states]
+ Parse the robot to get properties including joint information and mass
- :return Array[float]: Flat array of proprioceptive states (e.g.: [position, orientation, ...])
+ :param bodies: body ids in pybullet
+ :return: parts, joints, ordered_joints, robot_body, robot_mass
"""
- # Update states
- joint_states, _ = self.update_state()
- pos = self.get_position()
- rpy = self.get_rpy()
+ assert len(bodies) == 1, "robot body has length > 1"
- # rotate linear and angular velocities to local frame
- lin_vel = rotate_vector_3d(self.base_link.get_linear_velocity(), *rpy)
- ang_vel = rotate_vector_3d(self.base_link.get_angular_velocity(), *rpy)
-
- state = np.concatenate([pos, rpy, lin_vel, ang_vel, joint_states])
- return state
-
- def can_toggle(self, toggle_position, toggle_distance_threshold):
- """
- Returns True if the part of the robot that can toggle a toggleable is within the given range of a
- point corresponding to a toggle marker
- by default, we assume robot cannot toggle toggle markers
+ if self.parts is not None:
+ parts = self.parts
+ else:
+ parts = {}
- :param toggle_position: Array[float], (x,y,z) cartesian position values as a reference point for evaluating
- whether a toggle can occur
- :param toggle_distance_threshold: float, distance value below which a toggle is allowed
+ if self.jdict is not None:
+ joints = self.jdict
+ else:
+ joints = {}
- :return bool: True if the part of the robot that can toggle a toggleable is within the given range of a
- point corresponding to a toggle marker. By default, we assume robot cannot toggle toggle markers
- """
- return False
+ if self.ordered_joints is not None:
+ ordered_joints = self.ordered_joints
+ else:
+ ordered_joints = []
+
+ robot_mass = 0.0
+
+ base_name, robot_name = p.getBodyInfo(bodies[0])
+ base_name = base_name.decode("utf8")
+ robot_name = robot_name.decode("utf8")
+ parts[base_name] = BodyPart(base_name, bodies, 0, -1)
+ self.robot_name = robot_name
+ # if base_name is unspecified or equal to the base_name returned by p.getBodyInfo, use this link as robot_body (base_link).
+ if self.base_name is None or self.base_name == base_name:
+ self.robot_body = parts[base_name]
+ self.base_name = base_name
- def get_body_id(self):
- """
- Gets the body ID for this robot.
+ for j in range(p.getNumJoints(bodies[0])):
+ robot_mass += p.getDynamicsInfo(bodies[0], j)[0]
+ p.setJointMotorControl2(bodies[0], j, p.POSITION_CONTROL, positionGain=0.1, velocityGain=0.1, force=0)
+ _, joint_name, joint_type, _, _, _, _, _, _, _, _, _, part_name, _, _, _, _ = p.getJointInfo(bodies[0], j)
+ logging.debug("Robot joint: {}".format(p.getJointInfo(bodies[0], j)))
+ joint_name = joint_name.decode("utf8")
+ part_name = part_name.decode("utf8")
- If the object somehow has multiple bodies, this will be the default body that the default manipulation functions
- will manipulate.
+ parts[part_name] = BodyPart(part_name, bodies, 0, j)
- Should be implemented by all subclasses.
+ # otherwise, use the specified base_name link as robot_body (base_link).
+ if self.robot_body is None and self.base_name == part_name:
+ self.robot_body = parts[part_name]
- :return None or int: Body ID representing this model in simulation if it exists, else None
- """
- return None if self._body_ids is None else self._body_ids[0]
+ if joint_name[:6] == "ignore":
+ Joint(joint_name, bodies, 0, j).disable_motor()
+ continue
- def reset(self):
- """
- Reset function for each specific robot. Can be overwritten by subclass
+ if joint_name[:8] != "jointfix" and joint_type != p.JOINT_FIXED:
+ joints[joint_name] = Joint(joint_name, bodies, 0, j)
+ ordered_joints.append(joints[joint_name])
- By default, sets all joint states (pos, vel) to 0, and resets all controllers.
- """
- for joint in self._joints.values():
- joint.reset_state(0.0, 0.0)
+ if self.robot_body is None:
+ raise Exception("robot body not initialized.")
- for controller in self._controllers.values():
- controller.reset()
+ return parts, joints, ordered_joints, self.robot_body, robot_mass
- def _load_controllers(self):
- """
- Loads controller(s) to map inputted actions into executable (pos, vel, and / or torque) signals on this robot.
- Stores created controllers as dictionary mapping controller names to specific controller
- instances used by this robot.
+ def robot_specific_reset(self):
"""
- # Initialize controllers to create
- self._controllers = OrderedDict()
- # Loop over all controllers, in the order corresponding to @action dim
- for name in self.controller_order:
- assert_valid_key(key=name, valid_keys=self.controller_config, name="controller name")
- cfg = self.controller_config[name]
- # If we're using normalized action space, override the inputs for all controllers
- if self.action_normalize:
- cfg["command_input_limits"] = "default" # default is normalized (-1, 1)
- # Create the controller
- self._controllers[name] = create_controller(**cfg)
-
- @abstractmethod
- def _create_discrete_action_space(self):
- """
- Create a discrete action space for this robot. Should be implemented by the subclass (if a subclass does not
- support this type of action space, it should raise an error).
-
- :return gym.space: Robot-specific discrete action space
+ Reset function for each specific robot. Overwritten by subclasses
"""
raise NotImplementedError
- def _create_continuous_action_space(self):
- """
- Create a continuous action space for this robot. By default, this loops over all controllers and
- appends their respective input command limits to set the action space.
- Any custom behavior should be implemented by the subclass (e.g.: if a subclass does not
- support this type of action space, it should raise an error).
-
- :return gym.space.Box: Robot-specific continuous action space
- """
- # Action space is ordered according to the order in _default_controller_config control
- low, high = [], []
- for controller in self._controllers.values():
- limits = controller.command_input_limits
- low.append(np.array([-np.inf]) if limits is None else limits[0])
- high.append(np.array([np.inf]) if limits is None else limits[1])
-
- return gym.spaces.Box(
- shape=(self.action_dim,), low=np.concatenate(low), high=np.concatenate(high), dtype=np.float32
- )
-
- def apply_action(self, action):
- """
- Converts inputted actions into low-level control signals and deploys them on the robot
-
- :param action: Array[float], n-DOF length array of actions to convert and deploy on the robot
- """
- # Update state
- self.update_state()
-
- # If we're using discrete action space, we grab the specific action and use that to convert to control
- if self.action_type == "discrete":
- action = np.array(self.action_list[action])
-
- # Run convert actions to controls
- control, control_type = self._actions_to_control(action=action)
-
- # Deploy control signals
- self._deploy_control(control=control, control_type=control_type)
-
- def _actions_to_control(self, action):
- """
- Converts inputted @action into low level control signals to deploy directly on the robot.
- This returns two arrays: the converted low level control signals and an array corresponding
- to the specific ControlType for each signal.
-
- :param action: Array[float], n-DOF length array of actions to convert and deploy on the robot
- :return Tuple[Array[float], Array[ControlType]]: The (1) raw control signals to send to the robot's joints
- and (2) control types for each joint
- """
- # First, loop over all controllers, and calculate the computed control
- control = OrderedDict()
- idx = 0
- for name, controller in self._controllers.items():
- # Compose control_dict
- control_dict = self.get_control_dict()
- # Set command, then take a controller step
- controller.update_command(command=action[idx : idx + controller.command_dim])
- control[name] = {
- "value": controller.step(control_dict=control_dict),
- "type": controller.control_type,
- }
- # Update idx
- idx += controller.command_dim
-
- # Compose controls
- u_vec = np.zeros(self.n_joints)
- u_type_vec = np.array([ControlType.POSITION] * self.n_joints)
- for group, ctrl in control.items():
- idx = self._controllers[group].joint_idx
- u_vec[idx] = ctrl["value"]
- u_type_vec[idx] = ctrl["type"]
-
- # Return control
- return u_vec, u_type_vec
-
- def _deploy_control(self, control, control_type):
- """
- Deploys control signals @control with corresponding @control_type on this robot
-
- :param control: Array[float], raw control signals to send to the robot's joints
- :param control_type: Array[ControlType], control types for each joint
- """
- # Run sanity check
- joints = self._joints.values()
- assert len(control) == len(control_type) == len(joints), (
- "Control signals, control types, and number of joints should all be the same!"
- "Got {}, {}, and {} respectively.".format(len(control), len(control_type), len(joints))
- )
-
- # Loop through all control / types, and deploy the signal
- for joint, ctrl, ctrl_type in zip(joints, control, control_type):
- if ctrl_type == ControlType.TORQUE:
- joint.set_torque(ctrl)
- elif ctrl_type == ControlType.VELOCITY:
- joint.set_vel(ctrl)
- elif ctrl_type == ControlType.POSITION:
- joint.set_pos(ctrl)
- else:
- raise ValueError("Invalid control type specified: {}".format(ctrl_type))
-
- def get_proprioception(self):
- """
- :return Array[float]: numpy array of all robot-specific proprioceptive observations.
- """
- proprio_dict = self._get_proprioception_dict()
- return np.concatenate([proprio_dict[obs] for obs in self.proprio_obs])
-
- def get_position(self):
- """
- :return Array[float]: (x,y,z) global cartesian coordinates of this model's body (as taken at its body_id)
- """
- return self.get_position_orientation()[0]
-
- def get_orientation(self):
- """
- :return Array[float]: (x,y,z,w) global orientation in quaternion form of this model's body
- (as taken at its body_id)
- """
- return self.get_position_orientation()[1]
-
- def get_position_orientation(self):
- """
- :return Tuple[Array[float], Array[float]]: pos (x,y,z) global cartesian coordinates, quat (x,y,z,w) global
- orientation in quaternion form of this model's body (as taken at its body_id)
- """
- pos, orn = p.getBasePositionAndOrientation(self.get_body_id())
- return np.array(pos), np.array(orn)
-
- def get_rpy(self):
- """
- Return robot orientation in roll, pitch, yaw
- :return: roll, pitch, yaw
- """
- return self.base_link.get_rpy()
-
- def get_velocity(self):
- """
- Get velocity of this robot (velocity associated with base link)
-
- :return Tuple[Array[float], Array[float]]: linear (x,y,z) velocity, angular (ax,ay,az)
- velocity of this robot
- """
- return self.base_link.get_velocity()
-
- def get_linear_velocity(self):
- """
- Get linear velocity of this robot (velocity associated with base link)
-
- :return Array[float]: linear (x,y,z) velocity of this robot
- """
- return self.base_link.get_linear_velocity()
-
- def get_angular_velocity(self):
- """
- Get angular velocity of this robot (velocity associated with base link)
-
- :return Array[float]: angular (ax,ay,az) velocity of this robot
- """
- return self.base_link.get_angular_velocity()
-
- def set_position(self, pos):
- """
- Sets the model's global position
-
- :param pos: Array[float], corresponding to (x,y,z) global cartesian coordinates to set
+ def calc_state(self):
"""
- old_quat = self.get_orientation()
- self.set_position_orientation(pos, old_quat)
-
- def set_orientation(self, quat):
+ Calculate proprioceptive states for each specific robot.
+ Overwritten by subclasses
"""
- Set the model's global orientation
-
- :param quat: Array[float], corresponding to (x,y,z,w) global quaternion orientation to set
- """
- old_pos = self.get_position()
- self.set_position_orientation(old_pos, quat)
+ raise NotImplementedError
- def set_position_orientation(self, pos, quat):
+ def is_grasping(self, candidate_obj):
"""
- Set model's global position and orientation
-
- :param pos: Array[float], corresponding to (x,y,z) global cartesian coordinates to set
- :param quat: Array[float], corresponding to (x,y,z,w) global quaternion orientation to set
+ Returns True if the robot is grasping the target option.
"""
- p.resetBasePositionAndOrientation(self.get_body_id(), pos, quat)
+ raise NotImplementedError
- def get_control_dict(self):
+ def can_toggle(self, toggle_position, toggle_distance_threshold):
"""
- Grabs all relevant information that should be passed to each controller during each controller step.
-
- :return Dict[str, Array[float]]: Keyword-mapped control values for this robot.
- By default, returns the following:
-
- - joint_position: (n_dof,) joint positions
- - joint_velocity: (n_dof,) joint velocities
- - joint_torque: (n_dof,) joint torques
- - base_pos: (3,) (x,y,z) global cartesian position of the robot's base link
- - base_quat: (4,) (x,y,z,w) global cartesian orientation of ths robot's base link
+ Returns True if the part of the robot that can toggle a toggleable is within the given range of a point corresponding to a toggle marker
+ by default, we assume robot cannot toggle toggle markers
"""
- return {
- "joint_position": self.joint_positions,
- "joint_velocity": self.joint_velocities,
- "joint_torque": self.joint_torques,
- "base_pos": self.get_position(),
- "base_quat": self.get_orientation(),
- }
+ return False
def dump_state(self):
- """
- Dump the state of this model other than what's not included in native pybullet state. This defaults to a no-op.
- """
pass
def load_state(self, dump):
- """
- Load the state of this model other than what's not included in native pybullet state. This defaults to a no-op.
- """
pass
- def _get_proprioception_dict(self):
- """
- :return dict: keyword-mapped proprioception observations available for this robot. Can be extended by subclasses
- """
- return {
- "joint_qpos": self.joint_positions,
- "joint_qpos_sin": np.sin(self.joint_positions),
- "joint_qpos_cos": np.cos(self.joint_positions),
- "joint_qvel": self.joint_velocities,
- "joint_qtor": self.joint_torques,
- "robot_pos": self.get_position(),
- "robot_rpy": self.get_rpy(),
- "robot_quat": self.get_orientation(),
- "robot_lin_vel": self.get_linear_velocity(),
- "robot_ang_vel": self.get_angular_velocity(),
- }
-
- @property
- def proprioception_dim(self):
- """
- :return int: Size of self.get_proprioception() vector
- """
- return len(self.get_proprioception())
-
- @property
- def links(self):
- """
- Links belonging to this robot.
-
- :return OrderedDict[str, RobotLink]: Ordered Dictionary mapping robot link names to corresponding
- RobotLink objects owned by this robot
- """
- return self._links
-
- @property
- def joints(self):
- """
- Joints belonging to this robot.
-
- :return OrderedDict[str, RobotJoint]: Ordered Dictionary mapping robot joint names to corresponding
- RobotJoint objects owned by this robot
- """
- return self._joints
-
- @property
- def n_links(self):
- """
- :return int: Number of links for this robot
- """
- return len(list(self._links.keys()))
-
- @property
- def n_joints(self):
- """
- :return int: Number of joints for this robot
- """
- return len(list(self._joints.keys()))
-
- @property
- def base_link(self):
- """
- Returns the RobotLink body corresponding to the link as defined by self.base_name.
-
- Note that if base_name was not specified during this robot's initialization, this will default to be the
- first link in the underlying robot model file.
-
- :return RobotLink: robot's base link corresponding to self.base_name.
- """
- assert self.base_name in self._links, "Cannot find base link '{}' in links! Valid options are: {}".format(
- self.base_name, list(self._links.keys())
- )
- return self._links[self.base_name]
-
- @property
- def eyes(self):
- """
- Returns the RobotLink corresponding to the robot's camera. Assumes that there is a link
- with name "eyes" in the underlying robot model. If not, an error will be raised.
-
- :return RobotLink: link containing the robot's camera
- """
- assert "eyes" in self._links, "Cannot find 'eyes' in links, current link names are: {}".format(
- list(self._links.keys())
- )
- return self._links["eyes"]
-
- @property
- def mass(self):
- """
- Returns the mass of this robot. Default is 0.0 kg
-
- :return float: Mass of this robot, in kg
- """
- return self._mass
-
- @property
- def joint_position_limits(self):
- """
- :return Tuple[Array[float], Array[float]]: (min, max) joint position limits, where each is an n-DOF length array
- """
- return (self.joint_lower_limits, self.joint_upper_limits)
-
- @property
- def joint_velocity_limits(self):
- """
- :return Tuple[Array[float], Array[float]]: (min, max) joint velocity limits, where each is an n-DOF length array
- """
- return (
- -np.array([j.max_velocity for j in self._joints.values()]),
- np.array([j.max_velocity for j in self._joints.values()]),
- )
-
- @property
- def joint_torque_limits(self):
- """
- :return Tuple[Array[float], Array[float]]: (min, max) joint torque limits, where each is an n-DOF length array
- """
- return (
- -np.array([j.max_torque for j in self._joints.values()]),
- np.array([j.max_torque for j in self._joints.values()]),
- )
-
- @property
- def joint_positions(self):
- """
- :return Array[float]: n-DOF length array of this robot's joint positions
- """
- return deepcopy(self._joint_state["unnormalized"]["position"])
-
- @property
- def joint_velocities(self):
- """
- :return Array[float]: n-DOF length array of this robot's joint velocities
- """
- return deepcopy(self._joint_state["unnormalized"]["velocity"])
-
- @property
- def joint_torques(self):
- """
- :return Array[float]: n-DOF length array of this robot's joint torques
- """
- return deepcopy(self._joint_state["unnormalized"]["torque"])
-
- @property
- def joint_positions_normalized(self):
- """
- :return Array[float]: n-DOF length array of this robot's normalized joint positions in range [-1, 1]
- """
- return deepcopy(self._joint_state["normalized"]["position"])
-
- @property
- def joint_velocities_normalized(self):
- """
- :return Array[float]: n-DOF length array of this robot's normalized joint velocities in range [-1, 1]
- """
- return deepcopy(self._joint_state["normalized"]["velocity"])
-
- @property
- def joint_torques_normalized(self):
- """
- :return Array[float]: n-DOF length array of this robot's normalized joint torques in range [-1, 1]
- """
- return deepcopy(self._joint_state["normalized"]["torque"])
-
- @property
- def joint_at_limits(self):
- """
- :return Array[float]: n-DOF length array specifying whether joint is at its limit,
- with 1.0 --> at limit, otherwise 0.0
- """
- return deepcopy(self._joint_state["at_limits"])
-
- @property
- def joint_has_limits(self):
- """
- :return Array[bool]: n-DOF length array specifying whether joint has a limit or not
- """
- return np.array([j.has_limit for j in self._joints.values()])
-
- @property
- def category(self):
- """
- :return str: Semantic category for robots
- """
- return "agent"
-
- @property
- def action_dim(self):
- """
- :return int: Dimension of action space for this robot. By default,
- is the sum over all controller action dimensions
- """
- return sum([controller.command_dim for controller in self._controllers.values()])
-
- @property
- def action_space(self):
- """
- Action space for this robot.
-
- :return gym.space: Action space, either discrete (Discrete) or continuous (Box)
- """
- return deepcopy(self._action_space)
-
- @property
- @abstractmethod
- def controller_order(self):
- """
- :return Tuple[str]: Ordering of the actions, corresponding to the controllers. e.g., ["base", "arm", "gripper"],
- to denote that the action vector should be interpreted as first the base action, then arm command, then
- gripper command
- """
- raise NotImplementedError
-
- @property
- def controller_action_idx(self):
- """
- :return: Dict[str, Array[int]]: Mapping from controller names (e.g.: head, base, arm, etc.) to corresponding
- indices in the action vector
- """
- dic = {}
- idx = 0
- for controller in self.controller_order:
- cmd_dim = self._controllers[controller].command_dim
- dic[controller] = np.arange(idx, idx + cmd_dim)
- idx += cmd_dim
-
- return dic
-
- @property
- def control_limits(self):
- """
- :return: Dict[str, Any]: Keyword-mapped limits for this robot. Dict contains:
- position: (min, max) joint limits, where min and max are N-DOF arrays
- velocity: (min, max) joint velocity limits, where min and max are N-DOF arrays
- torque: (min, max) joint torque limits, where min and max are N-DOF arrays
- has_limit: (n_dof,) array where each element is True if that corresponding joint has a position limit
- (otherwise, joint is assumed to be limitless)
- """
- return {
- "position": (self.joint_lower_limits, self.joint_upper_limits),
- "velocity": (-self.max_joint_velocities, self.max_joint_velocities),
- "torque": (-self.max_joint_torques, self.max_joint_torques),
- "has_limit": self.joint_has_limits,
- }
-
- @property
- def default_proprio_obs(self):
- """
- :return Array[str]: Default proprioception observations to use
- """
- return []
-
- @property
- @abstractmethod
- def default_joint_pos(self):
- """
- :return Array[float]: Default joint positions for this robot
- """
- raise NotImplementedError
-
- @property
- @abstractmethod
- def _default_controller_config(self):
- """
- :return Dict[str, Any]: default nested dictionary mapping controller name(s) to specific controller
- configurations for this robot. Note that the order specifies the sequence of actions to be received
- from the environment.
-
- Expected structure is as follows:
- group1:
- controller_name1:
- controller_name1_params
- ...
- controller_name2:
- ...
- group2:
- ...
-
- The @group keys specify the control type for various aspects of the robot, e.g.: "head", "arm", "base", etc.
- @controller_name keys specify the supported controllers for that group. A default specification MUST be
- specified for each controller_name. e.g.: IKController, DifferentialDriveController, JointController, etc.
- """
- return {}
-
- @property
- @abstractmethod
- def _default_controllers(self):
- """
- :return Dict[str, str]: Maps robot group (e.g. base, arm, etc.) to default controller class name to use
- (e.g. IKController, JointController, etc.)
- """
- return {}
-
- @property
- def joint_ids(self):
- """
- :return: Array[int], joint IDs for this robot
- """
- return np.array([joint.joint_id for joint in self._joints.values()])
-
- @property
- def joint_damping(self):
- """
- :return: Array[float], joint damping values for this robot
- """
- return np.array([get_joint_info(self.get_body_id(), joint_id)[6] for joint_id in self.joint_ids])
-
- @property
- def joint_lower_limits(self):
- """
- :return: Array[float], minimum values for this robot's joints. If joint does not have a range, returns -1000
- for that joint
- """
- return np.array([joint.lower_limit if joint.has_limit else -1000.0 for joint in self._joints.values()])
-
- @property
- def joint_upper_limits(self):
- """
- :return: Array[float], maximum values for this robot's joints. If joint does not have a range, returns 1000
- for that joint
- """
- return np.array([joint.upper_limit if joint.has_limit else 1000.0 for joint in self._joints.values()])
-
- @property
- def joint_range(self):
- """
- :return: Array[float], joint range values for this robot's joints
- """
- return self.joint_upper_limits - self.joint_lower_limits
-
- @property
- def max_joint_velocities(self):
- """
- :return: Array[float], maximum velocities for this robot's joints
- """
- return np.array([joint.max_velocity for joint in self._joints.values()])
-
- @property
- def max_joint_torques(self):
- """
- :return: Array[float], maximum torques for this robot's joints
- """
- return np.array([joint.max_torque for joint in self._joints.values()])
-
- @property
- def disabled_collision_pairs(self):
- """
- :return Tuple[Tuple[str, str]]: List of collision pairs to disable. Default is None (empty list)
- """
- return []
-
- @property
- @abstractmethod
- def model_file(self):
- """
- :return str: absolute path to robot model's URDF / MJCF file
- """
- raise NotImplementedError
-
- def force_wakeup(self):
- """
- Wakes up all links of this robot
- """
- for link_name in self._links:
- self._links[link_name].force_wakeup()
+ def get_body_id(self):
+ body_id = None
+ if self.robot_ids:
+ body_id = self.robot_ids[0]
+ return body_id
-class RobotLink:
+class BodyPart:
"""
Body part (link) of Robots
"""
- def __init__(self, link_name, link_id, body_id):
- """
- :param link_name: str, name of the link corresponding to @link_id
- :param link_id: int, ID of this link within the link(s) found in the body corresponding to @body_id
- :param body_id: Robot body ID containing this link
- """
- # Store args and initialize state
- self.link_name = link_name
- self.link_id = link_id
- self.body_id = body_id
- self.initial_pos, self.initial_quat = self.get_position_orientation()
+ def __init__(self, body_name, bodies, body_index, body_part_index):
+ self.bodies = bodies
+ self.body_name = body_name
+ self.body_index = body_index
+ self.body_part_index = body_part_index
+ self.initialPosition = self.get_position()
+ self.initialOrientation = self.get_orientation()
self.movement_cid = -1
def get_name(self):
- """
- Get name of this link
- """
- return self.link_name
-
- def get_position_orientation(self):
- """
- Get pose of this link
+ """Get name of body part"""
+ return self.body_name
- :return Tuple[Array[float], Array[float]]: pos (x,y,z) cartesian coordinates, quat (x,y,z,w)
- orientation in quaternion form of this link
- """
- if self.link_id == -1:
- pos, quat = p.getBasePositionAndOrientation(self.body_id)
+ def _state_fields_of_pose_of(self, body_id, link_id=-1):
+ """Get pose of body part"""
+ if link_id == -1:
+ (x, y, z), (a, b, c, d) = p.getBasePositionAndOrientation(body_id)
else:
- _, _, _, _, pos, quat = p.getLinkState(self.body_id, self.link_id)
- return np.array(pos), np.array(quat)
+ _, _, _, _, (x, y, z), (a, b, c, d) = p.getLinkState(body_id, link_id)
+ return np.array([x, y, z, a, b, c, d])
+
+ def _set_fields_of_pose_of(self, pos, orn):
+ """Set pose of body part"""
+ p.resetBasePositionAndOrientation(self.bodies[self.body_index], pos, orn)
+
+ def get_pose(self):
+ """Get pose of body part"""
+ return self._state_fields_of_pose_of(self.bodies[self.body_index], self.body_part_index)
def get_position(self):
- """
- :return Array[float]: (x,y,z) cartesian coordinates of this link
- """
- return self.get_position_orientation()[0]
+ """Get position of body part"""
+ return self.get_pose()[:3]
def get_orientation(self):
- """
- :return Array[float]: (x,y,z,w) orientation in quaternion form of this link
- """
- return self.get_position_orientation()[1]
+ """Get orientation of body part
+ Orientation is by default defined in [x,y,z,w]"""
+ return self.get_pose()[3:]
def get_rpy(self):
- """
- :return Array[float]: (r,p,y) orientation in euler form of this link
- """
- return np.array(p.getEulerFromQuaternion(self.get_orientation()))
+ """Get roll, pitch and yaw of body part
+ [roll, pitch, yaw]"""
+ return p.getEulerFromQuaternion(self.get_orientation())
- def set_position(self, pos):
- """
- Sets the link's position
+ def set_position(self, position):
+ """Get position of body part"""
+ self._set_fields_of_pose_of(position, self.get_orientation())
- :param pos: Array[float], corresponding to (x,y,z) cartesian coordinates to set
- """
- old_quat = self.get_orientation()
- self.set_position_orientation(pos, old_quat)
+ def set_orientation(self, orientation):
+ """Get position of body part
+ Orientation is defined in [x,y,z,w]"""
+ self._set_fields_of_pose_of(self.current_position(), orientation)
- def set_orientation(self, quat):
- """
- Set the link's global orientation
+ def set_pose(self, position, orientation):
+ """Set pose of body part"""
+ self._set_fields_of_pose_of(position, orientation)
- :param quat: Array[float], corresponding to (x,y,z,w) quaternion orientation to set
- """
- old_pos = self.get_position()
- self.set_position_orientation(old_pos, quat)
+ def current_position(self):
+ """Synonym method for get_position"""
+ return self.get_position()
- def set_position_orientation(self, pos, quat):
- """
- Set model's global position and orientation. Note: only supported if this is the base link (ID = -1!)
+ def current_orientation(self):
+ """Synonym method for get_orientation"""
+ return self.get_orientation()
- :param pos: Array[float], corresponding to (x,y,z) global cartesian coordinates to set
- :param quat: Array[float], corresponding to (x,y,z,w) global quaternion orientation to set
- """
- assert self.link_id == -1, "Can only set pose for a base link (id = -1)! Got link id: {}.".format(self.link_id)
- p.resetBasePositionAndOrientation(self.body_id, pos, quat)
+ def reset_position(self, position): # Backward compatibility
+ """Synonym method for set_position"""
+ self.set_position(position)
- def get_velocity(self):
- """
- Get velocity of this link
+ def reset_orientation(self, orientation): # Backward compatibility
+ """Synonym method for set_orientation"""
+ self.set_orientation(orientation)
- :return Tuple[Array[float], Array[float]]: linear (x,y,z) velocity, angular (ax,ay,az)
- velocity of this link
- """
- if self.link_id == -1:
- lin, ang = p.getBaseVelocity(self.body_id)
- else:
- _, _, _, _, _, _, lin, ang = p.getLinkState(self.body_id, self.link_id, computeLinkVelocity=1)
- return np.array(lin), np.array(ang)
+ def reset_pose(self, position, orientation): # Backward compatibility
+ """Synonym method for set_pose"""
+ self.set_pose(position, orientation)
def get_linear_velocity(self):
"""
- Get linear velocity of this link
-
- :return Array[float]: linear (x,y,z) velocity of this link
+ Get linear velocity of the body part
"""
- return self.get_velocity()[0]
+ if self.body_part_index == -1:
+ (vx, vy, vz), _ = p.getBaseVelocity(self.bodies[self.body_index])
+ else:
+ _, _, _, _, _, _, (vx, vy, vz), _ = p.getLinkState(
+ self.bodies[self.body_index], self.body_part_index, computeLinkVelocity=1
+ )
+ return np.array([vx, vy, vz])
def get_angular_velocity(self):
"""
- Get angular velocity of this link
-
- :return Array[float]: angular (ax,ay,az) velocity of this link
+ Get angular velocity of the body part
"""
- return self.get_velocity()[1]
+ if self.body_part_index == -1:
+ _, (vr, vp, vyaw) = p.getBaseVelocity(self.bodies[self.body_index])
+ else:
+ _, _, _, _, _, _, _, (vr, vp, vyaw) = p.getLinkState(
+ self.bodies[self.body_index], self.body_part_index, computeLinkVelocity=1
+ )
+ return np.array([vr, vp, vyaw])
def contact_list(self):
"""
Get contact points of the body part
-
- :return Array[ContactPoints]: list of contact points seen by this link
- """
- return p.getContactPoints(self.body_id, -1, self.link_id, -1)
-
- def force_wakeup(self):
"""
- Forces a wakeup for this robot. Defaults to no-op.
- """
- pass
+ return p.getContactPoints(self.bodies[self.body_index], -1, self.body_part_index, -1)
-class RobotJoint:
+class Joint:
"""
- Joint of a robot
+ Joint of Robots
"""
- def __init__(self, joint_name, joint_id, body_id):
- """
- :param joint_name: str, name of the joint corresponding to @joint_id
- :param joint_id: int, ID of this joint within the joint(s) found in the body corresponding to @body_id
- :param body_id: Robot body ID containing this link
- """
- # Store args and initialize state
+ def __init__(self, joint_name, bodies, body_index, joint_index):
+ self.bodies = bodies
+ self.body_index = body_index
+ self.joint_index = joint_index
self.joint_name = joint_name
- self.joint_id = joint_id
- self.body_id = body_id
# read joint type and joint limit from the URDF file
# lower_limit, upper_limit, max_velocity, max_torque =
@@ -1150,7 +325,8 @@ def __init__(self, joint_name, joint_id, body_id):
_,
_,
_,
- ) = p.getJointInfo(self.body_id, self.joint_id)
+ ) = p.getJointInfo(self.bodies[self.body_index], self.joint_index)
+ self.joint_has_limit = self.lower_limit < self.upper_limit
# if joint torque and velocity limits cannot be found in the model file, set a default value for them
if self.max_torque == 0.0:
@@ -1158,30 +334,25 @@ def __init__(self, joint_name, joint_id, body_id):
if self.max_velocity == 0.0:
# if max_velocity and joint limit are missing for a revolute joint,
# it's likely to be a wheel joint and a high max_velocity is usually supported.
- self.max_velocity = 15.0 if self.joint_type == p.JOINT_REVOLUTE and not self.has_limit else 1.0
+ if self.joint_type == p.JOINT_REVOLUTE and not self.joint_has_limit:
+ self.max_velocity = 15.0
+ else:
+ self.max_velocity = 1.0
def __str__(self):
- return "idx: {}, name: {}".format(self.joint_id, self.joint_name)
+ return "idx: {}, name: {}".format(self.joint_index, self.joint_name)
def get_state(self):
- """
- Get the current state of the joint
-
- :return Tuple[float, float, float]: (joint_pos, joint_vel, joint_tor) observed for this joint
- """
- x, vx, _, trq = p.getJointState(self.body_id, self.joint_id)
+ """Get state of joint"""
+ x, vx, _, trq = p.getJointState(self.bodies[self.body_index], self.joint_index)
return x, vx, trq
def get_relative_state(self):
- """
- Get the normalized current state of the joint
-
- :return Tuple[float, float, float]: Normalized (joint_pos, joint_vel, joint_tor) observed for this joint
- """
+ """Get normalized state of joint"""
pos, vel, trq = self.get_state()
# normalize position to [-1, 1]
- if self.has_limit:
+ if self.joint_has_limit:
mean = (self.lower_limit + self.upper_limit) / 2.0
magnitude = (self.upper_limit - self.lower_limit) / 2.0
pos = (pos - mean) / magnitude
@@ -1194,56 +365,45 @@ def get_relative_state(self):
return pos, vel, trq
- def set_pos(self, pos):
- """
- Set position of joint (in metric space)
-
- :param pos: float, desired position for this joint, in metric space
- """
- if self.has_limit:
- pos = np.clip(pos, self.lower_limit, self.upper_limit)
- p.setJointMotorControl2(self.body_id, self.joint_id, p.POSITION_CONTROL, targetPosition=pos)
-
- def set_vel(self, vel):
- """
- Set velocity of joint (in metric space)
+ def set_position(self, position):
+ """Set position of joint"""
+ if self.joint_has_limit:
+ position = np.clip(position, self.lower_limit, self.upper_limit)
+ p.setJointMotorControl2(
+ self.bodies[self.body_index], self.joint_index, p.POSITION_CONTROL, targetPosition=position
+ )
- :param vel: float, desired velocity for this joint, in metric space
- """
- vel = np.clip(vel, -self.max_velocity, self.max_velocity)
- p.setJointMotorControl2(self.body_id, self.joint_id, p.VELOCITY_CONTROL, targetVelocity=vel)
+ def set_velocity(self, velocity):
+ """Set velocity of joint"""
+ velocity = np.clip(velocity, -self.max_velocity, self.max_velocity)
+ p.setJointMotorControl2(
+ self.bodies[self.body_index], self.joint_index, p.VELOCITY_CONTROL, targetVelocity=velocity
+ )
def set_torque(self, torque):
- """
- Set torque of joint (in metric space)
-
- :param torque: float, desired torque for this joint, in metric space
- """
+ """Set torque of joint"""
torque = np.clip(torque, -self.max_torque, self.max_torque)
p.setJointMotorControl2(
- bodyIndex=self.body_id,
- jointIndex=self.joint_id,
+ bodyIndex=self.bodies[self.body_index],
+ jointIndex=self.joint_index,
controlMode=p.TORQUE_CONTROL,
force=torque,
)
def reset_state(self, pos, vel):
"""
- Reset pos and vel of joint in metric space
-
- :param pos: float, desired position for this joint, in metric space
- :param vel: float, desired velocity for this joint, in metric space
+ Reset pos and vel of joint
"""
- p.resetJointState(self.body_id, self.joint_id, targetValue=pos, targetVelocity=vel)
+ p.resetJointState(self.bodies[self.body_index], self.joint_index, targetValue=pos, targetVelocity=vel)
self.disable_motor()
def disable_motor(self):
"""
- Disable the motor of this joint
+ disable the motor of joint
"""
p.setJointMotorControl2(
- self.body_id,
- self.joint_id,
+ self.bodies[self.body_index],
+ self.joint_index,
controlMode=p.POSITION_CONTROL,
targetPosition=0,
targetVelocity=0,
@@ -1252,9 +412,38 @@ def disable_motor(self):
force=0,
)
- @property
- def has_limit(self):
- """
- :return bool: True if this joint has a limit, else False
- """
- return self.lower_limit < self.upper_limit
+ def get_joint_relative_state(self): # Synonym method
+ """Synonym method for get_relative_state"""
+ return self.get_relative_state()
+
+ def set_motor_position(self, pos): # Synonym method
+ """Synonym method for set_position"""
+ return self.set_position(pos)
+
+ def set_motor_torque(self, torque): # Synonym method
+ """Synonym method for set_torque"""
+ return self.set_torque(torque)
+
+ def set_motor_velocity(self, vel): # Synonym method
+ """Synonym method for set_velocity"""
+ return self.set_velocity(vel)
+
+ def reset_joint_state(self, position, velocity): # Synonym method
+ """Synonym method for reset_state"""
+ return self.reset_state(position, velocity)
+
+ def current_position(self): # Backward compatibility
+ """Synonym method for get_state"""
+ return self.get_state()
+
+ def current_relative_position(self): # Backward compatibility
+ """Synonym method for get_relative_state"""
+ return self.get_relative_state()
+
+ def reset_current_position(self, position, velocity): # Backward compatibility
+ """Synonym method for reset_state"""
+ self.reset_state(position, velocity)
+
+ def reset_position(self, position, velocity): # Backward compatibility
+ """Synonym method for reset_state"""
+ self.reset_state(position, velocity)
diff --git a/igibson/robots/robot_locomotor.py b/igibson/robots/robot_locomotor.py
new file mode 100644
index 000000000..bc7cfa468
--- /dev/null
+++ b/igibson/robots/robot_locomotor.py
@@ -0,0 +1,299 @@
+import numpy as np
+from transforms3d.euler import euler2quat
+from transforms3d.quaternions import qmult, quat2mat
+
+from igibson.robots.robot_base import BaseRobot
+from igibson.utils.utils import rotate_vector_3d
+
+
+class LocomotorRobot(BaseRobot):
+ """
+ Built on top of BaseRobot.
+ Provides common interface for a wide variety of robots.
+ """
+
+ def __init__(
+ self,
+ filename, # robot file name
+ action_dim, # action dimension
+ base_name=None,
+ scale=1.0,
+ control="torque",
+ is_discrete=True,
+ torque_coef=1.0,
+ velocity_coef=1.0,
+ self_collision=False,
+ ):
+ BaseRobot.__init__(self, filename, base_name, scale, self_collision)
+ self.control = control
+ self.is_discrete = is_discrete
+
+ assert type(action_dim) == int, "Action dimension must be int, got {}".format(type(action_dim))
+ self.action_dim = action_dim
+
+ if self.is_discrete:
+ self.set_up_discrete_action_space()
+ else:
+ self.set_up_continuous_action_space()
+
+ self.torque_coef = torque_coef
+ self.velocity_coef = velocity_coef
+ self.scale = scale
+
+ def set_up_continuous_action_space(self):
+ """
+ Each subclass implements their own continuous action spaces
+ """
+ raise NotImplementedError
+
+ def set_up_discrete_action_space(self):
+ """
+ Each subclass implements their own discrete action spaces
+ """
+ raise NotImplementedError
+
+ def robot_specific_reset(self):
+ """
+ Robot specific reset. Apply zero velocity for all joints.
+ """
+ for j in self.ordered_joints:
+ j.reset_joint_state(0.0, 0.0)
+
+ def get_position(self):
+ """
+ Get current robot position
+ """
+ return self.robot_body.get_position()
+
+ def get_orientation(self):
+ """
+ Return robot orientation
+
+ :return: quaternion in xyzw
+ """
+ return self.robot_body.get_orientation()
+
+ def get_rpy(self):
+ """
+ Return robot orientation in roll, pitch, yaw
+
+ :return: roll, pitch, yaw
+ """
+ return self.robot_body.get_rpy()
+
+ def set_position(self, pos):
+ """
+ Set robot position
+
+ :param pos: robot position
+ """
+ self.robot_body.set_position(pos)
+
+ def set_orientation(self, orn):
+ """
+ Set robot orientation
+
+ :param orn: robot orientation
+ """
+ self.robot_body.set_orientation(orn)
+
+ def set_position_orientation(self, pos, orn):
+ """
+ Set robot position and orientation
+
+ :param pos: robot position
+ :param orn: robot orientation
+ """
+ self.robot_body.set_pose(pos, orn)
+
+ def get_linear_velocity(self):
+ """
+ Set robot base linear velocity
+
+ :return: base linear velocity
+ """
+ return self.robot_body.get_linear_velocity()
+
+ def get_angular_velocity(self):
+ """
+ Set robot base angular velocity
+
+ :return: base angular velocity
+ """
+ return self.robot_body.get_angular_velocity()
+
+ def move_by(self, delta):
+ """
+ Move robot base without physics simulation
+
+ :param delta: delta base position
+ """
+ new_pos = np.array(delta) + self.get_position()
+ self.robot_body.reset_position(new_pos)
+
+ def move_forward(self, forward=0.05):
+ """
+ Move robot base forward without physics simulation
+
+ :param forward: delta base position forward
+ """
+ x, y, z, w = self.robot_body.get_orientation()
+ self.move_by(quat2mat([w, x, y, z]).dot(np.array([forward, 0, 0])))
+
+ def move_backward(self, backward=0.05):
+ """
+ Move robot base backward without physics simulation
+
+ :param backward: delta base position backward
+ """
+ x, y, z, w = self.robot_body.get_orientation()
+ self.move_by(quat2mat([w, x, y, z]).dot(np.array([-backward, 0, 0])))
+
+ def turn_left(self, delta=0.03):
+ """
+ Rotate robot base left without physics simulation
+
+ :param delta: delta angle to rotate the base left
+ """
+ orn = self.robot_body.get_orientation()
+ new_orn = qmult((euler2quat(-delta, 0, 0)), orn)
+ self.robot_body.set_orientation(new_orn)
+
+ def turn_right(self, delta=0.03):
+ """
+ Rotate robot base right without physics simulation
+
+ :param delta: delta angle to rotate the base right
+ """
+ orn = self.robot_body.get_orientation()
+ new_orn = qmult((euler2quat(delta, 0, 0)), orn)
+ self.robot_body.set_orientation(new_orn)
+
+ def keep_still(self):
+ """
+ Keep the robot still. Apply zero velocity to all joints.
+ """
+ for n, j in enumerate(self.ordered_joints):
+ j.set_motor_velocity(0.0)
+
+ def apply_robot_action(self, action):
+ """
+ Apply robot action.
+ Support joint torque, velocity, position control and
+ differentiable drive / twist command control
+
+ :param action: robot action
+ """
+ if self.control == "torque":
+ for n, j in enumerate(self.ordered_joints):
+ j.set_motor_torque(self.torque_coef * j.max_torque * float(np.clip(action[n], -1, +1)))
+ elif self.control == "velocity":
+ for n, j in enumerate(self.ordered_joints):
+ j.set_motor_velocity(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1)))
+ elif self.control == "position":
+ for n, j in enumerate(self.ordered_joints):
+ j.set_motor_position(action[n])
+ elif self.control == "differential_drive":
+ # assume self.ordered_joints = [left_wheel, right_wheel]
+ assert (
+ action.shape[0] == 2 and len(self.ordered_joints) == 2
+ ), "differential drive requires the first two joints to be two wheels"
+ lin_vel, ang_vel = action
+ if not hasattr(self, "wheel_axle_half") or not hasattr(self, "wheel_radius"):
+ raise Exception(
+ "Trying to use differential drive, but wheel_axle_half and wheel_radius are not specified."
+ )
+ left_wheel_ang_vel = (lin_vel - ang_vel * self.wheel_axle_half) / self.wheel_radius
+ right_wheel_ang_vel = (lin_vel + ang_vel * self.wheel_axle_half) / self.wheel_radius
+ self.ordered_joints[0].set_motor_velocity(left_wheel_ang_vel)
+ self.ordered_joints[1].set_motor_velocity(right_wheel_ang_vel)
+ elif type(self.control) is list or type(self.control) is tuple:
+ # if control is a tuple, set different control type for each joint
+ if "differential_drive" in self.control:
+ # Assume the first two joints are wheels using differntiable drive control, and the rest use joint control
+ # assume self.ordered_joints = [left_wheel, right_wheel, joint_1, joint_2, ...]
+ assert (
+ action.shape[0] >= 2 and len(self.ordered_joints) >= 2
+ ), "differential drive requires the first two joints to be two wheels"
+ assert (
+ self.control[0] == self.control[1] == "differential_drive"
+ ), "differential drive requires the first two joints to be two wheels"
+ lin_vel, ang_vel = action[:2]
+ if not hasattr(self, "wheel_axle_half") or not hasattr(self, "wheel_radius"):
+ raise Exception(
+ "Trying to use differential drive, but wheel_axle_half and wheel_radius are not specified."
+ )
+ left_wheel_ang_vel = (lin_vel - ang_vel * self.wheel_axle_half) / self.wheel_radius
+ right_wheel_ang_vel = (lin_vel + ang_vel * self.wheel_axle_half) / self.wheel_radius
+ self.ordered_joints[0].set_motor_velocity(left_wheel_ang_vel)
+ self.ordered_joints[1].set_motor_velocity(right_wheel_ang_vel)
+
+ for n, j in enumerate(self.ordered_joints):
+ if self.control[n] == "torque":
+ j.set_motor_torque(self.torque_coef * j.max_torque * float(np.clip(action[n], -1, +1)))
+ elif self.control[n] == "velocity":
+ j.set_motor_velocity(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1)))
+ elif self.control[n] == "position":
+ j.set_motor_position(action[n])
+ else:
+ raise Exception("unknown control type: {}".format(self.control))
+
+ def policy_action_to_robot_action(self, action):
+ """
+ Scale the policy action (always in [-1, 1]) to robot action based on action range
+
+ :param action: policy action
+ :return: robot action
+ """
+ if self.is_discrete:
+ if isinstance(action, (list, np.ndarray)):
+ assert len(action) == 1 and isinstance(
+ action[0], (np.int64, int)
+ ), "discrete action has incorrect format"
+ action = action[0]
+ real_action = self.action_list[action]
+ else:
+ # self.action_space should always be [-1, 1] for policy training
+ action = np.clip(action, self.action_space.low, self.action_space.high)
+
+ # de-normalize action to the appropriate, robot-specific scale
+ real_action = (self.action_high - self.action_low) / 2.0 * action + (
+ self.action_high + self.action_low
+ ) / 2.0
+ return real_action
+
+ def apply_action(self, action):
+ """
+ Apply policy action
+ """
+ real_action = self.policy_action_to_robot_action(action)
+ self.apply_robot_action(real_action)
+
+ def calc_state(self):
+ """
+ Calculate commonly used proprioceptive states for the robot
+
+ :return: proprioceptive states
+ """
+ j = np.array([j.get_state() for j in self.ordered_joints]).astype(np.float32).flatten()
+
+ jn = np.array([j.get_joint_relative_state() for j in self.ordered_joints]).astype(np.float32).flatten()
+
+ # Get raw joint values and normalized versions
+ self.joint_position = j[0::3]
+ self.joint_velocity = j[1::3]
+ self.joint_torque = j[2::3]
+ self.joint_position_normalized = jn[0::3]
+ self.joint_velocity_normalized = jn[1::3]
+ self.joint_torque_normalized = jn[2::3]
+ self.joint_at_limit = np.count_nonzero(np.abs(self.joint_position_normalized) > 0.99)
+
+ pos = self.get_position()
+ rpy = self.get_rpy()
+
+ # rotate linear and angular velocities to local frame
+ lin_vel = rotate_vector_3d(self.get_linear_velocity(), *rpy)
+ ang_vel = rotate_vector_3d(self.get_angular_velocity(), *rpy)
+
+ state = np.concatenate([pos, rpy, lin_vel, ang_vel, j])
+ return state
diff --git a/igibson/robots/turtlebot.py b/igibson/robots/turtlebot.py
deleted file mode 100644
index f2761b848..000000000
--- a/igibson/robots/turtlebot.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-from collections import OrderedDict
-
-import gym
-import numpy as np
-
-import igibson
-from igibson.controllers import ControlType
-from igibson.robots.two_wheel_robot import TwoWheelRobot
-from igibson.utils.constants import SemanticClass
-from igibson.utils.python_utils import assert_valid_key
-
-
-class Turtlebot(TwoWheelRobot):
- """
- Turtlebot robot
- Reference: http://wiki.ros.org/Robots/TurtleBot
- Uses joint velocity control
- """
-
- @property
- def wheel_radius(self):
- return 0.038
-
- @property
- def wheel_axle_length(self):
- return 0.23
-
- @property
- def base_control_idx(self):
- """
- :return Array[int]: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
- """
- return np.array([1, 0])
-
- @property
- def default_joint_pos(self):
- return np.zeros(self.n_joints)
-
- @property
- def model_file(self):
- return os.path.join(igibson.assets_path, "models/turtlebot/turtlebot.urdf")
diff --git a/igibson/robots/turtlebot_robot.py b/igibson/robots/turtlebot_robot.py
new file mode 100644
index 000000000..1877edc21
--- /dev/null
+++ b/igibson/robots/turtlebot_robot.py
@@ -0,0 +1,55 @@
+import gym
+import numpy as np
+
+from igibson.robots.robot_locomotor import LocomotorRobot
+
+
+class Turtlebot(LocomotorRobot):
+ """
+ Turtlebot robot
+ Reference: http://wiki.ros.org/Robots/TurtleBot
+ Uses joint velocity control
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.velocity = config.get("velocity", 1.0)
+ LocomotorRobot.__init__(
+ self,
+ "turtlebot/turtlebot.urdf",
+ action_dim=2,
+ scale=config.get("robot_scale", 1.0),
+ is_discrete=config.get("is_discrete", False),
+ control="velocity",
+ )
+
+ def set_up_continuous_action_space(self):
+ """
+ Set up continuous action space
+ """
+ self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
+ self.action_high = np.full(shape=self.action_dim, fill_value=self.velocity)
+ self.action_low = -self.action_high
+
+ def set_up_discrete_action_space(self):
+ """
+ Set up discrete action space
+ """
+ self.action_list = [
+ [self.velocity, self.velocity],
+ [-self.velocity, -self.velocity],
+ [self.velocity * 0.5, -self.velocity * 0.5],
+ [-self.velocity * 0.5, self.velocity * 0.5],
+ [0, 0],
+ ]
+ self.action_space = gym.spaces.Discrete(len(self.action_list))
+ self.setup_keys_to_action()
+
+ def setup_keys_to_action(self):
+ self.keys_to_action = {
+ (ord("w"),): 0, # forward
+ (ord("s"),): 1, # backward
+ (ord("d"),): 2, # turn right
+ (ord("a"),): 3, # turn left
+ (): 4, # stay still
+ }
diff --git a/igibson/robots/two_wheel_robot.py b/igibson/robots/two_wheel_robot.py
deleted file mode 100644
index c0c98f96d..000000000
--- a/igibson/robots/two_wheel_robot.py
+++ /dev/null
@@ -1,144 +0,0 @@
-from abc import abstractmethod
-
-import gym
-import numpy as np
-from transforms3d.euler import euler2quat
-from transforms3d.quaternions import qmult, quat2mat
-
-from igibson.controllers import LocomotionController
-from igibson.robots.locomotion_robot import LocomotionRobot
-from igibson.utils.constants import SemanticClass
-from igibson.utils.python_utils import assert_valid_key
-
-
-class TwoWheelRobot(LocomotionRobot):
- """
- Robot that is is equipped with locomotive (navigational) capabilities, as defined by two wheels that can be used
- for differential drive (e.g.: Turtlebot).
- Provides common interface for a wide variety of robots.
-
- NOTE: controller_config should, at the minimum, contain:
- base: controller specifications for the controller to control this robot's base (locomotion).
- Should include:
-
- - name: Controller to create
- - relevant to the controller being created. Note that all values will have default
- values specified, but setting these individual kwargs will override them
- """
-
- def _validate_configuration(self):
- # Make sure base only has two indices (i.e.: two wheels for differential drive)
- assert len(self.base_control_idx) == 2, "Differential drive can only be used with robot with two base joints!"
-
- # run super
- super()._validate_configuration()
-
- def _create_discrete_action_space(self):
- # Set action list based on controller (joint or DD) used
-
- # We set straight velocity to be 50% of max velocity for the wheels
- max_wheel_joint_vels = self.control_limits["velocity"][1][self.base_control_idx]
- assert len(max_wheel_joint_vels) == 2, "TwoWheelRobot must only have two base (wheel) joints!"
- assert max_wheel_joint_vels[0] == max_wheel_joint_vels[1], "Both wheels must have the same max speed!"
- wheel_straight_vel = 0.5 * max_wheel_joint_vels[0]
- wheel_rotate_vel = 0.5
- if self.controller_config["base"]["name"] == "JointController":
- action_list = [
- [wheel_straight_vel, wheel_straight_vel],
- [-wheel_straight_vel, -wheel_straight_vel],
- [wheel_rotate_vel, -wheel_rotate_vel],
- [-wheel_rotate_vel, wheel_rotate_vel],
- [0, 0],
- ]
- else:
- # DifferentialDriveController
- lin_vel = wheel_straight_vel * self.wheel_radius
- ang_vel = wheel_rotate_vel * self.wheel_radius * 2.0 / self.wheel_axle_length
- action_list = [
- [lin_vel, 0],
- [-lin_vel, 0],
- [0, ang_vel],
- [0, -ang_vel],
- [0, 0],
- ]
-
- self.action_list = action_list
-
- # Return this action space
- return gym.spaces.Box(len(self.action_list))
-
- def _get_proprioception_dict(self):
- dic = super()._get_proprioception_dict()
-
- # Grab wheel joint velocity info
- joints = list(self._joints.values())
- wheel_joints = [joints[idx] for idx in self.base_control_idx]
- l_vel, r_vel = [jnt.get_state()[1] for jnt in wheel_joints]
-
- # Compute linear and angular velocities
- lin_vel = (l_vel + r_vel) / 2.0 * self.wheel_radius
- ang_vel = (r_vel - l_vel) / self.wheel_axle_length
-
- # Add info
- dic["dd_base_lin_vel"] = np.array([lin_vel])
- dic["dd_base_ang_vel"] = np.array([ang_vel])
-
- return dic
-
- @property
- def default_proprio_obs(self):
- obs_keys = super().default_proprio_obs
- return obs_keys + ["dd_base_lin_vel", "dd_base_ang_vel"]
-
- @property
- def _default_controllers(self):
- # Always call super first
- controllers = super()._default_controllers
-
- # Use DifferentialDrive as default
- controllers["base"] = "DifferentialDriveController"
-
- return controllers
-
- @property
- def _default_base_differential_drive_controller_config(self):
- """
- :return: Dict[str, Any] Default differential drive controller config to
- control this robot's base.
- """
- return {
- "name": "DifferentialDriveController",
- "control_freq": self.control_freq,
- "wheel_radius": self.wheel_radius,
- "wheel_axle_length": self.wheel_axle_length,
- "control_limits": self.control_limits,
- "joint_idx": self.base_control_idx,
- }
-
- @property
- def _default_controller_config(self):
- # Always run super method first
- cfg = super()._default_controller_config
-
- # Add differential drive option to base
- cfg["base"][
- self._default_base_differential_drive_controller_config["name"]
- ] = self._default_base_differential_drive_controller_config
-
- return cfg
-
- @property
- @abstractmethod
- def wheel_radius(self):
- """
- :return: float, radius of each wheel at the base, in metric units
- """
- raise NotImplementedError
-
- @property
- @abstractmethod
- def wheel_axle_length(self):
- """
- :return: float, perpendicular distance between the robot's two wheels, in metric units
- """
- raise NotImplementedError
diff --git a/igibson/scenes/empty_scene.py b/igibson/scenes/empty_scene.py
index b21b5c517..5fba59656 100644
--- a/igibson/scenes/empty_scene.py
+++ b/igibson/scenes/empty_scene.py
@@ -6,20 +6,17 @@
import pybullet_data
from igibson.scenes.scene_base import Scene
-from igibson.utils.constants import SemanticClass
from igibson.utils.utils import l2_distance
class EmptyScene(Scene):
"""
- An empty scene for debugging.
+ A empty scene for debugging
"""
- def __init__(self, render_floor_plane=False, floor_plane_rgba=[1.0, 1.0, 1.0, 1.0]):
+ def __init__(self):
super(EmptyScene, self).__init__()
self.objects = []
- self.render_floor_plane = render_floor_plane
- self.floor_plane_rgba = floor_plane_rgba
def get_objects(self):
return list(self.objects)
@@ -27,26 +24,23 @@ def get_objects(self):
def _add_object(self, obj):
self.objects.append(obj)
- def _load(self, simulator):
+ def _load(self):
+ """
+ Load the scene into pybullet
+ """
plane_file = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
self.floor_body_ids += [p.loadMJCF(plane_file)[0]]
p.changeDynamics(self.floor_body_ids[0], -1, lateralFriction=1)
- # White floor plane for visualization purpose if needed.
- p.changeVisualShape(self.floor_body_ids[0], -1, rgbaColor=self.floor_plane_rgba)
-
- if self.render_floor_plane:
- for id in self.floor_body_ids:
- simulator.load_object_in_renderer(
- None, id, SemanticClass.SCENE_OBJS, use_pbr=False, use_pbr_mapping=False
- )
+ # white floor plane for visualization purpose if needed
+ p.changeVisualShape(self.floor_body_ids[0], -1, rgbaColor=[1, 1, 1, 1])
- # Load additional objects & merge body IDs.
- additional_object_body_ids = [x for obj in self.objects for x in obj.load(simulator)]
+ # Load additional objects & merge body IDs
+ additional_object_body_ids = [x for obj in self.objects for x in obj.load()]
return self.floor_body_ids + additional_object_body_ids
def get_random_point(self, floor=None):
"""
- Get a random point in the region of [-5, 5] x [-5, 5].
+ Get a random point in the region of [-5, 5] x [-5, 5]
"""
return floor, np.array(
[
@@ -58,7 +52,7 @@ def get_random_point(self, floor=None):
def get_shortest_path(self, floor, source_world, target_world, entire_path=False):
"""
- Get a trivial shortest path because the scene is empty.
+ Get a trivial shortest path because the scene is empty
"""
logging.warning("WARNING: trying to compute the shortest path in EmptyScene (assuming empty space)")
shortest_path = np.stack((source_world, target_world))
diff --git a/igibson/scenes/gibson_indoor_scene.py b/igibson/scenes/gibson_indoor_scene.py
index 6283cb3e4..ad3d405cb 100644
--- a/igibson/scenes/gibson_indoor_scene.py
+++ b/igibson/scenes/gibson_indoor_scene.py
@@ -7,7 +7,6 @@
from igibson.scenes.indoor_scene import IndoorScene
from igibson.utils.assets_utils import get_scene_path, get_texture_file
-from igibson.utils.constants import SemanticClass
class StaticIndoorScene(IndoorScene):
@@ -25,8 +24,7 @@ def __init__(
build_graph=True,
num_waypoints=10,
waypoint_resolution=0.2,
- pybullet_load_texture=True,
- render_floor_plane=False,
+ pybullet_load_texture=False,
):
"""
Load a building scene and compute traversability
@@ -39,7 +37,6 @@ def __init__(
:param num_waypoints: number of way points returned
:param waypoint_resolution: resolution of adjacent way points
:param pybullet_load_texture: whether to load texture into pybullet. This is for debugging purpose only and does not affect robot's observations
- :param render_floor_plane: whether to render the additionally added floor planes
"""
super(StaticIndoorScene, self).__init__(
scene_id,
@@ -49,11 +46,9 @@ def __init__(
build_graph,
num_waypoints,
waypoint_resolution,
+ pybullet_load_texture,
)
- logging.info("StaticIndoorScene scene: {}".format(scene_id))
- self.pybullet_load_texture = pybullet_load_texture
- self.render_floor_plane = render_floor_plane
-
+ # logging.info("StaticIndoorScene scene: {}".format(scene_id))
self.objects = []
def load_floor_metadata(self):
@@ -67,7 +62,7 @@ def load_floor_metadata(self):
self.floor_heights = sorted(list(map(float, f.readlines())))
logging.debug("Floors {}".format(self.floor_heights))
- def load_scene_mesh(self, simulator):
+ def load_scene_mesh(self):
"""
Load scene mesh
"""
@@ -76,7 +71,7 @@ def load_scene_mesh(self, simulator):
filename = os.path.join(get_scene_path(self.scene_id), "mesh_z_up.obj")
collision_id = p.createCollisionShape(p.GEOM_MESH, fileName=filename, flags=p.GEOM_FORCE_CONCAVE_TRIMESH)
- if simulator.use_pb_gui and self.pybullet_load_texture:
+ if self.pybullet_load_texture:
visual_id = p.createVisualShape(p.GEOM_MESH, fileName=filename)
else:
visual_id = -1
@@ -84,7 +79,7 @@ def load_scene_mesh(self, simulator):
self.mesh_body_id = p.createMultiBody(baseCollisionShapeIndex=collision_id, baseVisualShapeIndex=visual_id)
p.changeDynamics(self.mesh_body_id, -1, lateralFriction=1)
- if simulator.use_pb_gui and self.pybullet_load_texture:
+ if self.pybullet_load_texture:
texture_filename = get_texture_file(filename)
if texture_filename is not None:
texture_id = p.loadTexture(texture_filename)
@@ -101,26 +96,17 @@ def load_floor_planes(self):
p.setCollisionFilterPair(self.mesh_body_id, floor_body_id, -1, -1, enableCollision=0)
self.floor_body_ids.append(floor_body_id)
- def _load(self, simulator):
+ def _load(self):
"""
Load the scene (including scene mesh and floor plane) into pybullet
"""
self.load_floor_metadata()
- self.load_scene_mesh(simulator)
+ self.load_scene_mesh()
self.load_floor_planes()
self.load_trav_map(get_scene_path(self.scene_id))
- simulator.load_object_in_renderer(
- None, self.mesh_body_id, SemanticClass.SCENE_OBJS, use_pbr=False, use_pbr_mapping=False
- )
- if self.render_floor_plane:
- for body_id in self.floor_body_ids:
- simulator.load_object_in_renderer(
- None, body_id, SemanticClass.SCENE_OBJS, use_pbr=False, use_pbr_mapping=False
- )
-
- additional_object_body_ids = [x for obj in self.objects for x in obj.load(simulator)]
+ additional_object_body_ids = [x for obj in self.objects for x in obj.load()]
return [self.mesh_body_id] + self.floor_body_ids + additional_object_body_ids
def get_objects(self):
diff --git a/igibson/scenes/igibson_indoor_scene.py b/igibson/scenes/igibson_indoor_scene.py
index 070184c0f..c55fec4b5 100644
--- a/igibson/scenes/igibson_indoor_scene.py
+++ b/igibson/scenes/igibson_indoor_scene.py
@@ -17,7 +17,6 @@
from igibson.object_states.object_state_base import AbsoluteObjectState
from igibson.objects.articulated_object import URDFObject
from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
-from igibson.robots.behavior_robot import BehaviorRobot
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.utils.assets_utils import (
get_3dfront_scene_path,
@@ -28,7 +27,7 @@
get_ig_model_path,
get_ig_scene_path,
)
-from igibson.utils.utils import restoreState, rotate_vector_3d
+from igibson.utils.utils import rotate_vector_3d
SCENE_SOURCE = ["IG", "CUBICASA", "THREEDFRONT"]
@@ -52,6 +51,7 @@ def __init__(
build_graph=True,
num_waypoints=10,
waypoint_resolution=0.2,
+ pybullet_load_texture=False,
texture_randomization=False,
link_collision_tolerance=0.03,
object_randomization=False,
@@ -65,7 +65,6 @@ def __init__(
scene_source="IG",
merge_fixed_links=True,
ignore_visual_shape=False,
- rendering_params=None,
):
"""
:param scene_id: Scene id
@@ -76,6 +75,7 @@ def __init__(
:param build_graph: build connectivity graph
:param num_waypoints: number of way points returned
:param waypoint_resolution: resolution of adjacent way points
+ :param pybullet_load_texture: whether to load texture into pybullet. This is for debugging purpose only and does not affect robot's observations
:param texture_randomization: whether to randomize material/texture
:param link_collision_tolerance: tolerance of the percentage of links that cannot be fully extended after object randomization
:param object_randomization: whether to randomize object
@@ -87,7 +87,6 @@ def __init__(
:param load_room_instances: only load objects in these room instances into the scene (a list of str)
:param seg_map_resolution: room segmentation map resolution
:param scene_source: source of scene data; among IG, CUBICASA, THREEDFRONT
- :param rendering_params: additional rendering params to be passed into object initializers (e.g. texture scale)
"""
super(InteractiveIndoorScene, self).__init__(
@@ -98,6 +97,7 @@ def __init__(
build_graph,
num_waypoints,
waypoint_resolution,
+ pybullet_load_texture,
)
self.texture_randomization = texture_randomization
self.object_randomization = object_randomization
@@ -123,9 +123,7 @@ def __init__(
scene_dir = get_3dfront_scene_path(scene_id)
self.scene_source = scene_source
self.scene_dir = scene_dir
- self.fname = fname
self.scene_file = os.path.join(scene_dir, "urdf", "{}.urdf".format(fname))
- logging.info("Loading scene URDF: {}".format(self.scene_file))
self.scene_tree = ET.parse(self.scene_file)
self.random_groups = {}
self.objects_by_category = {}
@@ -274,7 +272,6 @@ def __init__(
for joint in self.scene_tree.findall("joint")
if joint.find("child").attrib["link"] == object_name
][0]
-
bddl_object_scope = link.attrib.get("object_scope", None)
joint_positions = (
json.loads(link.attrib["joint_positions"]) if "joint_positions" in link.keys() else None
@@ -296,7 +293,6 @@ def __init__(
joint_positions=joint_positions,
merge_fixed_links=self.merge_fixed_links,
ignore_visual_shape=ignore_visual_shape,
- rendering_params=rendering_params,
)
# Load object states.
@@ -314,7 +310,7 @@ def __init__(
self.object_groupers[link.attrib["grouper"]]["object_parts"] = []
self.object_groupers[link.attrib["grouper"]]["object_parts"].append(obj)
else:
- self.add_object(obj, simulator=None)
+ self.add_object(obj)
elif link.attrib["name"] != "world":
logging.error("iGSDF should only contain links that represent embedded URDF objects")
@@ -328,12 +324,20 @@ def __init__(
pose_offsets = grouper["pose_offsets"]
grouped_obj_parts = ObjectGrouper(list(zip(object_parts, pose_offsets)))
obj = ObjectMultiplexer(multiplexer, [whole_object, grouped_obj_parts], current_index)
- self.add_object(obj, simulator=None)
+ self.add_object(obj)
def set_ignore_visual_shape(self, value):
self.ignore_visual_shape = value
for obj in self.get_objects():
- obj.set_ignore_visual_shape(value)
+ # NOTE: (njk) need to ignore agent parts
+ # here to avoid crash, which happens because
+ # we load scenes in predicators with already-
+ # existing agents
+ try:
+ if obj.name not in ["left_hand_1", "right_hand_1", "BRBody_1", "BREye_1"]:
+ obj.set_ignore_visual_shape(value)
+ except AttributeError:
+ import ipdb; ipdb.set_trace()
def get_objects(self):
return list(self.objects_by_name.values())
@@ -476,7 +480,7 @@ def remove_object(self, obj):
for id in obj.body_ids:
del self.objects_by_id[id]
else:
- del self.objects_by_id[obj.get_body_id()]
+ del self.objects_by_id[obj.body_id]
def _add_object(self, obj):
"""
@@ -484,15 +488,6 @@ def _add_object(self, obj):
:param obj: Object instance to add to scene.
"""
- # TODO: Remove this hack.
- # BehaviorRobot parts all go into the scene as agent objects. To retain backwards compatibility, we special case
- # this here. This logic is to be removed once BehaviorRobot is unified into BaseRobot.
- if isinstance(obj, BehaviorRobot):
- for part in obj.links.values():
- assert part._loaded, "BehaviorRobot parts need to be pre-loaded."
- self._add_object(part)
- return
-
if hasattr(obj, "category"):
category = obj.category
else:
@@ -532,8 +527,8 @@ def _add_object(self, obj):
for id in obj.body_ids:
self.objects_by_id[id] = obj
else:
- if obj.get_body_id() is not None:
- self.objects_by_id[obj.get_body_id()] = obj
+ if obj.body_id is not None:
+ self.objects_by_id[obj.body_id] = obj
def randomize_texture(self):
"""
@@ -584,8 +579,9 @@ def check_scene_quality(self, body_ids, fixed_body_ids):
# build mapping from body_id to object name for debugging
body_id_to_name = {}
for name in self.objects_by_name:
- for body_id in self.objects_by_name[name].body_ids:
- body_id_to_name[body_id] = name
+ if self.objects_by_name[name].name not in ["left_hand_1", "right_hand_1", "BRBody_1", "BREye_1"]:
+ for body_id in self.objects_by_name[name].body_ids:
+ body_id_to_name[body_id] = name
self.body_id_to_name = body_id_to_name
# collect body ids for overlapped bboxes (e.g. tables and chairs,
@@ -637,21 +633,21 @@ def check_scene_quality(self, body_ids, fixed_body_ids):
j_high_perc = j_range * 0.66 + j_low
# check if j_default has collision
- restoreState(state_id)
+ p.restoreState(state_id)
p.resetJointState(body_id, joint_id, j_default)
p.stepSimulation()
has_collision = self.check_collision(body_a=body_id, link_a=joint_id, fixed_body_ids=fixed_body_ids)
joint_quality = joint_quality and (not has_collision)
# check if j_low_perc has collision
- restoreState(state_id)
+ p.restoreState(state_id)
p.resetJointState(body_id, joint_id, j_low_perc)
p.stepSimulation()
has_collision = self.check_collision(body_a=body_id, link_a=joint_id, fixed_body_ids=fixed_body_ids)
joint_quality = joint_quality and (not has_collision)
# check if j_high_perc has collision
- restoreState(state_id)
+ p.restoreState(state_id)
p.resetJointState(body_id, joint_id, j_high_perc)
p.stepSimulation()
has_collision = self.check_collision(body_a=body_id, link_a=joint_id, fixed_body_ids=fixed_body_ids)
@@ -664,29 +660,29 @@ def check_scene_quality(self, body_ids, fixed_body_ids):
quality_check = quality_check and (joint_collision_so_far <= joint_collision_allowed)
# restore state to the initial state before testing collision
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
self.quality_check = quality_check
self.body_collision_set = set()
for body_a, body_b in body_body_collision:
- logging.warning(
- "scene quality check: {} and {} has collision.".format(
- body_id_to_name[body_a],
- body_id_to_name[body_b],
- )
- )
+ # logging.warning(
+ # "scene quality check: {} and {} has collision.".format(
+ # body_id_to_name[body_a],
+ # body_id_to_name[body_b],
+ # )
+ # )
self.body_collision_set.add(body_id_to_name[body_a])
self.body_collision_set.add(body_id_to_name[body_b])
self.link_collision_set = set()
for body_id in body_link_collision:
- logging.warning(
- "scene quality check: {} has joint that cannot extend for >66%.".format(
- body_id_to_name[body_id],
- )
- )
+ # logging.warning(
+ # "scene quality check: {} has joint that cannot extend for >66%.".format(
+ # body_id_to_name[body_id],
+ # )
+ # )
self.link_collision_set.add(body_id_to_name[body_id])
return self.quality_check
@@ -755,7 +751,7 @@ def open_one_obj(self, body_id, mode="random"):
p.resetJointState(body_id, joint_id, j_high - j_pos)
p.stepSimulation()
has_collision = self.check_collision(body_a=body_id, link_a=joint_id)
- restoreState(state_id)
+ p.restoreState(state_id)
if not has_collision:
p.resetJointState(body_id, joint_id, j_high - j_pos)
break
@@ -769,7 +765,7 @@ def open_one_obj(self, body_id, mode="random"):
p.resetJointState(body_id, joint_id, j_pos)
p.stepSimulation()
has_collision = self.check_collision(body_a=body_id, link_a=joint_id)
- restoreState(state_id)
+ p.restoreState(state_id)
if not has_collision:
p.resetJointState(body_id, joint_id, j_pos)
reset_success = True
@@ -827,20 +823,33 @@ def open_all_doors(self):
"""
return self.open_all_objs_by_category("door", mode="max")
- def _load(self, simulator):
+ def _load(self):
"""
Load all scene objects into pybullet
"""
# Load all the objects
body_ids = []
fixed_body_ids = []
+ visual_mesh_to_material = []
+ link_name_to_vm = []
for int_object in self.objects_by_name:
obj = self.objects_by_name[int_object]
- new_ids = obj.load(simulator)
+ new_ids = obj.load()
+ if isinstance(new_ids, int):
+ new_ids = [new_ids]
for id in new_ids:
self.objects_by_id[id] = obj
- body_ids += new_ids
- fixed_body_ids += [body_id for body_id, is_fixed in zip(obj.body_ids, obj.is_fixed) if is_fixed]
+ # NOTE: (njk) need to ignore agent parts
+ # here to avoid crash, which happens because
+ # we load scenes in predicators with already-
+ # existing agents
+ if obj.name not in ["left_hand_1", "right_hand_1", "BRBody_1", "BREye_1"]:
+ body_ids += new_ids
+ visual_mesh_to_material += obj.visual_mesh_to_material
+ link_name_to_vm += obj.link_name_to_vm
+ fixed_body_ids += [body_id for body_id, is_fixed in zip(obj.body_ids, obj.is_fixed) if is_fixed]
+ assert len(visual_mesh_to_material) == len(body_ids)
+ assert len(link_name_to_vm) == len(body_ids)
# disable collision between the fixed links of the fixed objects
for i in range(len(fixed_body_ids)):
@@ -854,6 +863,8 @@ def _load(self, simulator):
if self.build_graph:
self.load_trav_map(maps_path)
+ self.visual_mesh_to_material = visual_mesh_to_material
+ self.link_name_to_vm = link_name_to_vm
self.check_scene_quality(body_ids, fixed_body_ids)
# force wake up each body once
@@ -1000,7 +1011,7 @@ def get_room_instance_by_point(self, xy):
"""
x, y = self.world_to_seg_map(xy)
- if x >= self.room_ins_map.shape[0] or y >= self.room_ins_map.shape[1]:
+ if x > self.room_ins_map.shape[0] or y > self.room_ins_map.shape[1]:
return None
ins_id = self.room_ins_map[x, y]
# room boundary
@@ -1017,14 +1028,8 @@ def get_body_ids(self):
"""
ids = []
for obj_name in self.objects_by_name:
- # TODO: Remove URDFObject-specific logic
- if (
- hasattr(self.objects_by_name[obj_name], "body_ids")
- and self.objects_by_name[obj_name].body_ids is not None
- ):
- ids.extend(self.objects_by_name[obj_name].body_ids)
- elif self.objects_by_name[obj_name].get_body_id() is not None:
- ids.append(self.objects_by_name[obj_name].get_body_id())
+ if self.objects_by_name[obj_name].body_id is not None:
+ ids.extend(self.objects_by_name[obj_name].body_id)
return ids
def save_obj_or_multiplexer(self, obj, tree_root, additional_attribs_by_name):
@@ -1079,7 +1084,10 @@ def save_obj(self, obj, tree_root, additional_attribs_by_name):
link = tree_root.find('link[@name="{}"]'.format(name))
# Convert from center of mass to base link position
- body_id = obj.get_body_id()
+ if hasattr(obj, "body_ids"):
+ body_id = obj.body_ids[obj.main_body]
+ else:
+ body_id = obj.body_id
dynamics_info = p.getDynamicsInfo(body_id, -1)
inertial_pos = dynamics_info[3]
@@ -1162,7 +1170,7 @@ def save_obj(self, obj, tree_root, additional_attribs_by_name):
# Common logic for objects that are both in the scene & otherwise.
# Add joints
- body_ids = obj.body_ids if hasattr(obj, "body_ids") else [obj.get_body_id()]
+ body_ids = obj.body_ids if hasattr(obj, "body_ids") else [obj.body_id]
joint_data = []
for bid in body_ids:
this_joint_data = {}
diff --git a/igibson/scenes/indoor_scene.py b/igibson/scenes/indoor_scene.py
index df019b8d3..c10f7fb4d 100644
--- a/igibson/scenes/indoor_scene.py
+++ b/igibson/scenes/indoor_scene.py
@@ -29,6 +29,7 @@ def __init__(
build_graph=True,
num_waypoints=10,
waypoint_resolution=0.2,
+ pybullet_load_texture=False,
):
"""
Load an indoor scene and compute traversability
@@ -40,9 +41,10 @@ def __init__(
:param build_graph: build connectivity graph
:param num_waypoints: number of way points returned
:param waypoint_resolution: resolution of adjacent way points
+ :param pybullet_load_texture: whether to load texture into pybullet. This is for debugging purpose only and does not affect robot's observations
"""
super(IndoorScene, self).__init__()
- logging.info("IndoorScene model: {}".format(scene_id))
+ # logging.info("IndoorScene model: {}".format(scene_id))
self.scene_id = scene_id
self.trav_map_default_resolution = 0.01 # each pixel represents 0.01m
self.trav_map_resolution = trav_map_resolution
@@ -54,6 +56,7 @@ def __init__(
self.num_waypoints = num_waypoints
self.waypoint_interval = int(waypoint_resolution / trav_map_resolution)
self.mesh_body_id = None
+ self.pybullet_load_texture = pybullet_load_texture
self.floor_heights = [0.0]
def load_trav_map(self, maps_path):
@@ -104,11 +107,11 @@ def build_trav_graph(self, maps_path, floor, trav_map):
maps_path, "floor_trav_{}_py{}{}.p".format(floor, sys.version_info.major, sys.version_info.minor)
)
if os.path.isfile(graph_file):
- logging.info("Loading traversable graph")
+ # logging.info("Loading traversable graph")
with open(graph_file, "rb") as pfile:
g = pickle.load(pfile)
else:
- logging.info("Building traversable graph")
+ # logging.info("Building traversable graph")
g = nx.Graph()
for i in range(self.trav_map_size):
for j in range(self.trav_map_size):
@@ -138,7 +141,7 @@ def build_trav_graph(self, maps_path, floor, trav_map):
for node in g.nodes:
trav_map[node[0], node[1]] = 255
- def get_random_point(self, floor=None):
+ def get_random_point(self, floor=None, rng=None):
"""
Sample a random point on the given floor number. If not given, sample a random floor number.
@@ -150,7 +153,10 @@ def get_random_point(self, floor=None):
floor = self.get_random_floor()
trav = self.floor_map[floor]
trav_space = np.where(trav == 255)
- idx = np.random.randint(0, high=trav_space[0].shape[0])
+ if rng is None:
+ idx = np.random.randint(0, high=trav_space[0].shape[0])
+ else:
+ idx = rng.integers(low=0, high=trav_space[0].shape[0])
xy_map = np.array([trav_space[0][idx], trav_space[1][idx]])
x, y = self.map_to_world(xy_map)
z = self.floor_heights[floor]
diff --git a/igibson/scenes/scene_base.py b/igibson/scenes/scene_base.py
index 263feaf68..cc283bd55 100644
--- a/igibson/scenes/scene_base.py
+++ b/igibson/scenes/scene_base.py
@@ -8,8 +8,8 @@
class Scene(with_metaclass(ABCMeta)):
"""
- Base class for all Scene objects.
- Contains the base functionalities and the functions that all derived classes need to implement.
+ Base class for all Scene objects
+ Contains the base functionalities and the functions that all derived classes need to implement
"""
def __init__(self):
@@ -18,47 +18,33 @@ def __init__(self):
self.floor_body_ids = [] # List of ids of the floor_heights
@abstractmethod
- def _load(self, simulator):
+ def _load(self):
"""
- Load the scene into simulator (pybullet and renderer).
- The elements to load may include: floor, building, objects, etc.
+ Load the scene into pybullet
+ The elements to load may include: floor, building, objects, etc
- :param simulator: the simulator to load the scene into
- :return: a list of pybullet ids of elements composing the scene, including floors, buildings and objects
+ :return: A list of pybullet ids of elements composing the scene, including floors, buildings and objects
"""
raise NotImplementedError()
- def load(self, simulator):
+ def load(self):
"""
- Load the scene into simulator (pybullet and renderer).
- The elements to load may include: floor, building, objects, etc.
+ Load the scene into pybullet
+ The elements to load may include: floor, building, objects, etc
- :param simulator: the simulator to load the scene into
- :return: a list of pybullet ids of elements composing the scene, including floors, buildings and objects
+ :return: A list of pybullet ids of elements composing the scene, including floors, buildings and objects
"""
# Do not override this function. Override _load instead.
if self.loaded:
raise ValueError("This scene is already loaded.")
-
self.loaded = True
- return self._load(simulator)
+ return self._load()
@abstractmethod
def get_objects(self):
- """
- Get the objects in the scene.
-
- :return: a list of objects
- """
raise NotImplementedError()
def get_objects_with_state(self, state):
- """
- Get the objects with a given state in the scene.
-
- :param state: state of the objects to get
- :return: a list of objects with the given state
- """
return [item for item in self.get_objects() if hasattr(item, "states") and state in item.states]
@abstractmethod
@@ -69,23 +55,22 @@ def _add_object(self, obj):
Note that if the scene is not loaded, it should load this added object alongside its other objects when
scene.load() is called. The object should also be accessible through scene.get_objects().
- :param obj: the object to load
+ :param obj: The object to load.
"""
raise NotImplementedError()
- def add_object(self, obj, simulator, _is_call_from_simulator=False):
+ def add_object(self, obj, _is_call_from_simulator=False):
"""
Add an object to the scene, loading it if the scene is already loaded.
Note that calling add_object to an already loaded scene should only be done by the simulator's import_object()
function.
- :param obj: the object to load
- :param simulator: the simulator to add the object to
- :param _is_call_from_simulator: whether the caller is the simulator. This should
- **not** be set by any callers that are not the Simulator class
- :return: the body ID(s) of the loaded object if the scene was already loaded, or None if the scene is not loaded
- (in that case, the object is stored to be loaded together with the scene)
+ :param obj: The object to load.
+ :param _is_call_from_simulator: Bool indicating if the caller is the simulator. This should
+ **not** be set by any callers that are not the Simulator class.
+ :return: The body ID(s) of the loaded object if the scene was already loaded, or None if the scene is not loaded
+ (in that case, the object is stored to be loaded together with the scene).
"""
if self.loaded and not _is_call_from_simulator:
raise ValueError("To add an object to an already-loaded scene, use the Simulator's import_object function.")
@@ -95,50 +80,51 @@ def add_object(self, obj, simulator, _is_call_from_simulator=False):
# If the scene is already loaded, we need to load this object separately. Otherwise, don't do anything now,
# let scene._load() load the object when called later on.
- body_ids = None
+ body_id_or_ids = None
if self.loaded:
- body_ids = obj.load(simulator)
+ body_id_or_ids = obj.load()
self._add_object(obj)
- return body_ids
+ return body_id_or_ids
def get_random_floor(self):
"""
- Sample a random floor among all existing floor_heights in the scene.
+ Sample a random floor among all existing floor_heights in the scene
While Gibson v1 scenes can have several floor_heights, the EmptyScene, StadiumScene and scenes from iGibson
- have only a single floor.
+ have only a single floor
- :return: an integer between 0 and NumberOfFloors-1
+ :return: An integer between 0 and NumberOfFloors-1
"""
return 0
def get_random_point(self, floor=None):
"""
- Sample a random valid location in the given floor.
+ Sample a random valid location in the given floor
:param floor: integer indicating the floor, or None if randomly sampled
- :return: a tuple of random floor and random valid point (3D) in that floor
+ :return: A tuple of random floor and random valid point (3D) in that floor
"""
raise NotImplementedError()
def get_shortest_path(self, floor, source_world, target_world, entire_path=False):
"""
- Query the shortest path between two points in the given floor.
+ Query the shortest path between two points in the given floor
- :param floor: floor to compute shortest path in
- :param source_world: initial location in world reference frame
- :param target_world: target location in world reference frame
- :param entire_path: flag indicating if the function should return the entire shortest path or not
- :return: a tuple of path (if indicated) as a list of points, and geodesic distance (lenght of the path)
+ :param floor: Floor to compute shortest path in
+ :param source_world: Initial location in world reference frame
+ :param target_world: Target location in world reference frame
+ :param entire_path: Flag indicating if the function should return the entire shortest path or not
+ :return: Tuple of path (if indicated) as a list of points, and geodesic distance (lenght of the path)
"""
raise NotImplementedError()
def get_floor_height(self, floor=0):
"""
- Get the height of the given floor.
+ Get the height of the given floor
- :param floor: an integer identifying the floor
- :return: height of the given floor
+ :param floor: Integer identifying the floor
+ :return: Height of the given floor
"""
+ del floor
return 0.0
diff --git a/igibson/scenes/stadium_scene.py b/igibson/scenes/stadium_scene.py
index 45422f766..c99bc275f 100644
--- a/igibson/scenes/stadium_scene.py
+++ b/igibson/scenes/stadium_scene.py
@@ -6,7 +6,6 @@
import pybullet_data
from igibson.scenes.scene_base import Scene
-from igibson.utils.constants import SemanticClass
from igibson.utils.utils import l2_distance
@@ -19,7 +18,7 @@ def __init__(self):
super(StadiumScene, self).__init__()
self.objects = []
- def _load(self, simulator):
+ def _load(self):
"""
Load the scene into pybullet
"""
@@ -31,12 +30,8 @@ def _load(self, simulator):
p.resetBasePositionAndOrientation(self.floor_body_ids[0], [pos[0], pos[1], pos[2] - 0.005], orn)
p.changeVisualShape(self.floor_body_ids[0], -1, rgbaColor=[1, 1, 1, 0.5])
- # Only load the stadium mesh into the renderer, not the extra floor plane
- for id in list(self.stadium):
- simulator.load_object_in_renderer(None, id, SemanticClass.SCENE_OBJS, use_pbr=False, use_pbr_mapping=False)
-
# Load additional objects & merge body IDs
- additional_object_body_ids = [x for obj in self.objects for x in obj.load(simulator)]
+ additional_object_body_ids = [x for obj in self.objects for x in obj.load()]
return list(self.stadium) + self.floor_body_ids + additional_object_body_ids
def get_random_point(self, floor=None):
diff --git a/igibson/sensors/scan_sensor.py b/igibson/sensors/scan_sensor.py
index ebe519ab7..7fb392121 100644
--- a/igibson/sensors/scan_sensor.py
+++ b/igibson/sensors/scan_sensor.py
@@ -28,10 +28,8 @@ def __init__(self, env, modalities):
self.noise_model.set_noise_rate(self.scan_noise_rate)
self.noise_model.set_noise_value(1.0)
- self.laser_position, self.laser_orientation = (
- env.robots[0].links[self.laser_link_name].get_position_orientation()
- )
- self.base_position, self.base_orientation = env.robots[0].base_link.get_position_orientation()
+ self.laser_pose = env.robots[0].parts[self.laser_link_name].get_pose()
+ self.base_pose = env.robots[0].parts["base_link"].get_pose()
if "occupancy_grid" in self.modalities:
self.grid_resolution = self.config.get("grid_resolution", 128)
@@ -63,16 +61,12 @@ def get_local_occupancy_grid(self, scan):
scan_laser = unit_vector_laser * (scan * (laser_linear_range - min_laser_dist) + min_laser_dist)
- laser_translation = self.laser_position
- laser_rotation = quat2mat(
- [self.laser_orientation[3], self.laser_orientation[0], self.laser_orientation[1], self.laser_orientation[2]]
- )
+ laser_translation = self.laser_pose[:3]
+ laser_rotation = quat2mat([self.laser_pose[6], self.laser_pose[3], self.laser_pose[4], self.laser_pose[5]])
scan_world = laser_rotation.dot(scan_laser.T).T + laser_translation
- base_translation = self.base_position
- base_rotation = quat2mat(
- [self.base_orientation[3], self.base_orientation[0], self.base_orientation[1], self.base_orientation[2]]
- )
+ base_translation = self.base_pose[:3]
+ base_rotation = quat2mat([self.base_pose[6], self.base_pose[3], self.base_pose[4], self.base_pose[5]])
scan_local = base_rotation.T.dot((scan_world - base_translation).T).T
scan_local = scan_local[:, :2]
scan_local = np.concatenate([np.array([[0, 0]]), scan_local, np.array([[0, 0]])], axis=0)
@@ -113,25 +107,23 @@ def get_obs(self, env):
:return: LiDAR sensor reading and local occupancy grid, normalized to [0.0, 1.0]
"""
laser_angular_half_range = self.laser_angular_range / 2.0
- if self.laser_link_name not in env.robots[0].links:
+ if self.laser_link_name not in env.robots[0].parts:
raise Exception(
"Trying to simulate LiDAR sensor, but laser_link_name cannot be found in the robot URDF file. Please add a link named laser_link_name at the intended laser pose. Feel free to check out assets/models/turtlebot/turtlebot.urdf and examples/configs/turtlebot_p2p_nav.yaml for examples."
)
- laser_position, laser_orientation = env.robots[0].links[self.laser_link_name].get_position_orientation()
+ laser_pose = env.robots[0].parts[self.laser_link_name].get_pose()
angle = np.arange(
-laser_angular_half_range / 180 * np.pi,
laser_angular_half_range / 180 * np.pi,
self.laser_angular_range / 180.0 * np.pi / self.n_horizontal_rays,
)
unit_vector_local = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angle])
- transform_matrix = quat2mat(
- [laser_orientation[3], laser_orientation[0], laser_orientation[1], laser_orientation[2]]
- ) # [x, y, z, w]
+ transform_matrix = quat2mat([laser_pose[6], laser_pose[3], laser_pose[4], laser_pose[5]]) # [x, y, z, w]
unit_vector_world = transform_matrix.dot(unit_vector_local.T).T
- start_pose = np.tile(laser_position, (self.n_horizontal_rays, 1))
+ start_pose = np.tile(laser_pose[:3], (self.n_horizontal_rays, 1))
start_pose += unit_vector_world * self.min_laser_dist
- end_pose = laser_position + unit_vector_world * self.laser_linear_range
+ end_pose = laser_pose[:3] + unit_vector_world * self.laser_linear_range
results = p.rayTestBatch(start_pose, end_pose, 6) # numThreads = 6
# hit fraction = [0.0, 1.0] of self.laser_linear_range
diff --git a/igibson/sensors/vision_sensor.py b/igibson/sensors/vision_sensor.py
index 8d3e6cde2..c5667b235 100644
--- a/igibson/sensors/vision_sensor.py
+++ b/igibson/sensors/vision_sensor.py
@@ -135,14 +135,14 @@ def get_seg(self, raw_vision_obs):
"""
:return: semantic segmentation mask, between 0 and MAX_CLASS_COUNT
"""
- seg = np.round(raw_vision_obs["seg"][:, :, 0:1] * MAX_CLASS_COUNT).astype(np.int32)
+ seg = (raw_vision_obs["seg"][:, :, 0:1] * MAX_CLASS_COUNT).astype(np.int32)
return seg
def get_ins_seg(self, raw_vision_obs):
"""
:return: semantic segmentation mask, between 0 and MAX_INSTANCE_COUNT
"""
- seg = np.round(raw_vision_obs["ins_seg"][:, :, 0:1] * MAX_INSTANCE_COUNT).astype(np.int32)
+ seg = (raw_vision_obs["ins_seg"][:, :, 0:1] * MAX_INSTANCE_COUNT).astype(np.int32)
return seg
def get_obs(self, env):
diff --git a/igibson/simulator.py b/igibson/simulator.py
index 3348f29f9..63ab38fb3 100644
--- a/igibson/simulator.py
+++ b/igibson/simulator.py
@@ -1,40 +1,37 @@
+import ctypes
import logging
import os
import platform
+import time
+from time import sleep
import numpy as np
import pybullet as p
import igibson
from igibson.object_states.factory import get_states_by_dependency_order
-from igibson.objects.object_base import NonRobotObject
+from igibson.objects.articulated_object import ArticulatedObject, URDFObject
+from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
+from igibson.objects.object_base import Object
from igibson.objects.particles import Particle, ParticleSystem
+from igibson.objects.stateful_object import StatefulObject
from igibson.objects.visual_marker import VisualMarker
-from igibson.render.mesh_renderer.materials import ProceduralMaterial, RandomizedMaterial
+from igibson.render.mesh_renderer.instances import Instance, InstanceGroup
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_tensor import MeshRendererG2G
-from igibson.render.viewer import Viewer, ViewerSimple
+from igibson.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR, VrSettings
+from igibson.render.viewer import Viewer, ViewerSimple, ViewerVR
from igibson.robots.behavior_robot import BehaviorRobot
from igibson.robots.robot_base import BaseRobot
+from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.scenes.scene_base import Scene
from igibson.utils.assets_utils import get_ig_avg_category_specs
-from igibson.utils.constants import PYBULLET_BASE_LINK_INDEX, PyBulletSleepState, SimulatorMode
+from igibson.utils.constants import PyBulletSleepState, SemanticClass
from igibson.utils.mesh_util import quat2rotmat, xyz2mat, xyzw2wxyz
-
-
-def load_without_pybullet_vis(load_func):
- """
- Load without pybullet visualizer.
- """
-
- def wrapped_load_func(*args, **kwargs):
- p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False)
- res = load_func(*args, **kwargs)
- p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
- return res
-
- return wrapped_load_func
+from igibson.utils.semantics_utils import get_class_name_to_class_id
+from igibson.utils.utils import quatXYZWFromRotMat
+from igibson.utils.vr_utils import VR_CONTROLLERS, VR_DEVICES, VrData, calc_offset, calc_z_rot_from_right
class Simulator:
@@ -49,13 +46,14 @@ def __init__(
physics_timestep=1 / 120.0,
render_timestep=1 / 30.0,
solver_iterations=100,
- mode="gui_interactive",
+ mode="gui",
image_width=128,
image_height=128,
vertical_fov=90,
device_idx=0,
+ render_to_tensor=False,
rendering_settings=MeshRendererSettings(),
- use_pb_gui=False,
+ vr_settings=VrSettings(),
):
"""
:param gravity: gravity on z direction.
@@ -64,13 +62,15 @@ def __init__(
:param solver_iterations: number of solver iterations to feed into pybullet, can be reduced to increase speed.
pybullet default value is 50.
:param use_variable_step_num: whether to use a fixed (1) or variable physics step number
- :param mode: choose mode from headless, headless_tensor, gui_interactive, gui_non_interactive
+ :param mode: choose mode from gui, headless, iggui (only open iGibson UI), or pbgui(only open pybullet UI)
:param image_width: width of the camera image
:param image_height: height of the camera image
:param vertical_fov: vertical field of view of the camera image in degrees
:param device_idx: GPU device index to run rendering on
+ :param render_to_tensor: Render to GPU tensors
+ disable it when you want to run multiple physics step but don't need to visualize each frame
:param rendering_settings: settings to use for mesh renderer
- :param use_pb_gui: concurrently display the interactive pybullet gui (for debugging)
+ :param vr_settings: settings to use for VR in simulator and MeshRendererVR
"""
# physics simulator
self.gravity = gravity
@@ -79,53 +79,101 @@ def __init__(
self.solver_iterations = solver_iterations
self.physics_timestep_num = self.render_timestep / self.physics_timestep
assert self.physics_timestep_num.is_integer(), "render_timestep must be a multiple of physics_timestep"
-
self.physics_timestep_num = int(self.physics_timestep_num)
- self.mode = SimulatorMode[mode.upper()]
+
+ self.mode = mode
+
+ self.scene = None
self.particle_systems = []
+
+ # TODO: remove this, currently used for testing only
self.objects = []
+ plt = platform.system()
+ if plt == "Darwin" and self.mode == "gui":
+ self.mode = "iggui" # for mac os disable pybullet rendering
+ logging.warn(
+ "Rendering both iggui and pbgui is not supported on mac, choose either pbgui or "
+ "iggui. Default to iggui."
+ )
+ if plt == "Windows" and self.mode in ["vr"]:
+ # By default, windows does not provide ms level timing accuracy
+ winmm = ctypes.WinDLL("winmm")
+ winmm.timeBeginPeriod(1)
+
+ self.use_pb_renderer = False
+ self.use_ig_renderer = False
+ self.use_vr_renderer = False
+ self.use_simple_viewer = False
+
+ if self.mode in ["gui", "iggui"]:
+ self.use_ig_renderer = True
+
+ if self.mode in ["gui", "pbgui"]:
+ self.use_pb_renderer = True
+
+ if self.mode in ["vr"]:
+ self.use_vr_renderer = True
+ rendering_settings.blend_highlight = True
+
+ if self.mode in ["simple"]:
+ self.use_simple_viewer = True
+
+ # Starting position for the VR (default set to None if no starting position is specified by the user)
+ self.vr_start_pos = None
+ self.eye_tracking_data = None
+ self.max_haptic_duration = 4000
self.image_width = image_width
self.image_height = image_height
self.vertical_fov = vertical_fov
self.device_idx = device_idx
- self.rendering_settings = rendering_settings
- self.use_pb_gui = use_pb_gui
+ self.render_to_tensor = render_to_tensor
- plt = platform.system()
- if plt == "Darwin" and self.mode == SimulatorMode.GUI_INTERACTIVE and use_pb_gui:
- self.use_pb_gui = False # for mac os disable pybullet rendering
- logging.warning(
- "Simulator mode gui_interactive is not supported when `use_pb_gui` is true on macOS. Default to use_pb_gui = False."
- )
- if plt != "Linux" and self.mode == SimulatorMode.HEADLESS_TENSOR:
- self.mode = SimulatorMode.HEADLESS
- logging.warning("Simulator mode headless_tensor is only supported on Linux. Default to headless mode.")
-
- self.frame_count = 0
- self.body_links_awake = 0
+ self.optimized_renderer = rendering_settings.optimized
+ self.rendering_settings = rendering_settings
self.viewer = None
- self.renderer = None
-
- self.initialize_renderer()
- self.initialize_physics_engine()
- self.initialize_viewers()
+ self.vr_settings = vr_settings
+ self.vr_overlay_initialized = False
+ # We must be using the Simulator's vr mode and have use_vr set to true in the settings to access the VR context
+ self.can_access_vr_context = self.use_vr_renderer and self.vr_settings.use_vr
+ # Duration of a vsync frame - assumes 90Hz refresh rate
+ self.vsync_frame_dur = 11.11e-3
+ # Get expected number of vsync frames per iGibson frame
+ # Note: currently assumes a 90Hz VR system
+ self.vsync_frame_num = int(round(self.render_timestep / self.vsync_frame_dur))
+ # Total amount of time we want non-blocking actions to take each frame
+ # Leave a small amount of time before the last vsync, just in case we overrun
+ self.non_block_frame_time = (self.vsync_frame_num - 1) * self.vsync_frame_dur + (
+ 5e-3 if self.vr_settings.curr_device == "OCULUS" else 10e-3
+ )
+ # Timing variables for functions called outside of step() that also take up frame time
+ self.frame_end_time = None
+ self.main_vr_robot = None
+
+ # Variables for data saving and replay in VR
+ self.last_physics_timestep = -1
+ self.last_render_timestep = -1
+ self.last_physics_step_num = -1
+ self.last_frame_dur = -1
+ self.frame_count = 0
- self.robots = []
+ self.load()
+ self.class_name_to_class_id = get_class_name_to_class_id()
+ self.body_links_awake = 0
# First sync always sync all objects (regardless of their sleeping states)
self.first_sync = True
-
# Set of categories that can be grasped by assisted grasping
- self.object_state_types = get_states_by_dependency_order()
-
- self.assist_grasp_category_allow_list = self.gen_assisted_grasping_categories()
+ self.assist_grasp_category_allow_list = set()
+ self.gen_assisted_grasping_categories()
self.assist_grasp_mass_thresh = 10.0
+ self.object_state_types = get_states_by_dependency_order()
+
def set_timestep(self, physics_timestep, render_timestep):
"""
- Set physics timestep and render (action) timestep.
+ Set physics timestep and render (action) timestep
:param physics_timestep: physics timestep for pybullet
:param render_timestep: rendering timestep for renderer
@@ -134,36 +182,39 @@ def set_timestep(self, physics_timestep, render_timestep):
self.render_timestep = render_timestep
p.setTimeStep(self.physics_timestep)
- def disconnect(self, release_renderer=True):
+ def set_render_timestep(self, render_timestep):
"""
- Clean up the simulator.
+ :param render_timestep: render timestep to set in the Simulator
+ """
+ self.render_timestep = render_timestep
- :param release_renderer: whether to release the MeshRenderer
+ def add_viewer(self):
"""
- if p.getConnectionInfo(self.cid)["isConnected"]:
- # print("******************PyBullet Logging Information:")
- p.resetSimulation(physicsClientId=self.cid)
- p.disconnect(self.cid)
- # print("PyBullet Logging Information******************")
- if release_renderer:
- self.renderer.release()
+ Attach a debugging viewer to the renderer.
+ This will make the step much slower so should be avoided when training agents
+ """
+ if self.use_vr_renderer:
+ self.viewer = ViewerVR(
+ self.vr_settings.use_companion_window, frame_save_path=self.vr_settings.frame_save_path
+ )
+ elif self.use_simple_viewer:
+ self.viewer = ViewerSimple()
+ else:
+ self.viewer = Viewer(simulator=self, renderer=self.renderer)
+ self.viewer.renderer = self.renderer
def reload(self):
"""
Destroy the MeshRenderer and physics simulator and start again.
"""
self.disconnect()
+ self.load()
- self.initialize_renderer()
- self.initialize_physics_engine()
- self.initialize_viewers()
-
- def initialize_renderer(self):
+ def load(self):
"""
- Initialize the MeshRenderer.
+ Set up MeshRenderer and physics simulation client. Initialize the list of objects.
"""
- self.visual_objects = {}
- if self.mode == SimulatorMode.HEADLESS_TENSOR:
+ if self.render_to_tensor:
self.renderer = MeshRendererG2G(
width=self.image_width,
height=self.image_height,
@@ -172,7 +223,11 @@ def initialize_renderer(self):
rendering_settings=self.rendering_settings,
simulator=self,
)
- elif self.mode in [SimulatorMode.GUI_INTERACTIVE, SimulatorMode.GUI_NON_INTERACTIVE, SimulatorMode.HEADLESS]:
+ elif self.use_vr_renderer:
+ self.renderer = MeshRendererVR(
+ rendering_settings=self.rendering_settings, vr_settings=self.vr_settings, simulator=self
+ )
+ else:
self.renderer = MeshRenderer(
width=self.image_width,
height=self.image_height,
@@ -181,254 +236,801 @@ def initialize_renderer(self):
rendering_settings=self.rendering_settings,
simulator=self,
)
- else:
- raise Exception(
- "The available render modes are headless, headless_tensor, gui_interactive, and gui_non_interactive."
- )
- def initialize_physics_engine(self):
- """
- Initialize the physics engine (pybullet).
- """
- if self.use_pb_gui:
+ # print("******************PyBullet Logging Information:")
+ if self.use_pb_renderer:
self.cid = p.connect(p.GUI)
else:
self.cid = p.connect(p.DIRECT)
- # Needed for deterministic action replay
- # TODO(mjlbach) consider making optional and benchmark
+ # Simulation reset is needed for deterministic action replay
+ #if self.vr_settings.reset_sim:
p.resetSimulation()
p.setPhysicsEngineParameter(deterministicOverlappingPairs=1)
p.setPhysicsEngineParameter(numSolverIterations=self.solver_iterations)
p.setTimeStep(self.physics_timestep)
p.setGravity(0, 0, -self.gravity)
p.setPhysicsEngineParameter(enableFileCaching=0)
+ self.visual_objects = {}
+ self.robots = []
+ self.scene = None
+ if (self.use_ig_renderer or self.use_vr_renderer or self.use_simple_viewer) and not self.render_to_tensor:
+ self.add_viewer()
- def initialize_viewers(self):
- if self.mode == SimulatorMode.GUI_NON_INTERACTIVE:
- self.viewer = ViewerSimple(renderer=self.renderer)
- elif self.mode == SimulatorMode.GUI_INTERACTIVE:
- self.viewer = Viewer(simulator=self, renderer=self.renderer)
+ def load_without_pybullet_vis(load_func):
+ """
+ Load without pybullet visualizer
+ """
+
+ def wrapped_load_func(*args, **kwargs):
+ p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False)
+ res = load_func(*args, **kwargs)
+ p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
+ return res
+
+ return wrapped_load_func
@load_without_pybullet_vis
- def import_scene(self, scene):
+ def import_scene(
+ self,
+ scene,
+ texture_scale=1.0,
+ load_texture=True,
+ render_floor_plane=False,
+ class_id=SemanticClass.SCENE_OBJS,
+ ):
"""
Import a scene into the simulator. A scene could be a synthetic one or a realistic Gibson Environment.
- :param scene: a scene object to load
+ :param scene: Scene object
+ :param texture_scale: Option to scale down the texture for rendering
+ :param load_texture: If you don't need rgb output, texture loading could be skipped to make rendering faster
+ :param render_floor_plane: Whether to render the additionally added floor plane
+ :param class_id: Class id for rendering semantic segmentation
+ :return: pybullet body ids from scene.load function
"""
- assert isinstance(scene, Scene), "import_scene can only be called with Scene"
- scene.load(self)
+ assert isinstance(scene, Scene) and not isinstance(
+ scene, InteractiveIndoorScene
+ ), "import_scene can only be called with Scene that is not InteractiveIndoorScene"
+ # Load the scene. Returns a list of pybullet ids of the objects loaded that we can use to
+ # load them in the renderer
+ new_object_pb_ids = scene.load()
+ self.objects += new_object_pb_ids
+
+ # Load the objects in the renderer
+ for new_object_pb_id in new_object_pb_ids:
+ self.load_object_in_renderer(
+ new_object_pb_id,
+ class_id=class_id,
+ texture_scale=texture_scale,
+ load_texture=load_texture,
+ render_floor_plane=render_floor_plane,
+ use_pbr=False,
+ use_pbr_mapping=False,
+ )
+
+ # TODO: add instance renferencing for iG v1 scenes
+
self.scene = scene
+ # Load the states of all the objects in the scene.
+ for obj in scene.get_objects():
+ if isinstance(obj, StatefulObject):
+ if isinstance(obj, ObjectMultiplexer):
+ for sub_obj in obj._multiplexed_objects:
+ if isinstance(sub_obj, ObjectGrouper):
+ for group_sub_obj in sub_obj.objects:
+ for state in group_sub_obj.states.values():
+ state.initialize(self)
+ else:
+ for state in sub_obj.states.values():
+ state.initialize(self)
+ else:
+ for state in obj.states.values():
+ state.initialize(self)
+
+ return new_object_pb_ids
+
@load_without_pybullet_vis
- def import_particle_system(self, particle_system):
+ def import_ig_scene(self, scene):
"""
- Import a particle system into the simulator. Called by objects owning a particle-system, via reference to the Simulator instance.
+ Import scene from iGSDF class
- :param particle_system: a ParticleSystem object to load
+ :param scene: iGSDFScene instance
+ :return: pybullet body ids from scene.load function
"""
-
assert isinstance(
- particle_system, ParticleSystem
- ), "import_particle_system can only be called with ParticleSystem"
+ scene, InteractiveIndoorScene
+ ), "import_ig_scene can only be called with InteractiveIndoorScene"
+ if not self.use_pb_renderer:
+ scene.set_ignore_visual_shape(True)
+ # skip loading visual shape if not using pybullet visualizer
+
+ new_object_ids = scene.load()
+ self.objects += new_object_ids
+
+ for body_id, visual_mesh_to_material, link_name_to_vm in zip(
+ new_object_ids, scene.visual_mesh_to_material, scene.link_name_to_vm
+ ):
+ use_pbr = True
+ use_pbr_mapping = True
+ shadow_caster = True
+ physical_object = scene.objects_by_id[body_id]
+ if scene.scene_source == "IG":
+ if physical_object.category in ["walls", "floors", "ceilings"]:
+ use_pbr = False
+ use_pbr_mapping = False
+ if physical_object.category == "ceilings":
+ shadow_caster = False
+ class_id = self.class_name_to_class_id.get(physical_object.category, SemanticClass.SCENE_OBJS)
+ self.load_articulated_object_in_renderer(
+ body_id,
+ class_id=class_id,
+ visual_mesh_to_material=visual_mesh_to_material,
+ link_name_to_vm=link_name_to_vm,
+ use_pbr=use_pbr,
+ use_pbr_mapping=use_pbr_mapping,
+ shadow_caster=shadow_caster,
+ physical_object=physical_object,
+ )
+
+ self.scene = scene
+
+ # Load the states of all the objects in the scene.
+ for obj in scene.get_objects():
+ if isinstance(obj, StatefulObject):
+ if isinstance(obj, ObjectMultiplexer):
+ for sub_obj in obj._multiplexed_objects:
+ if isinstance(sub_obj, ObjectGrouper):
+ for group_sub_obj in sub_obj.objects:
+ for state in group_sub_obj.states.values():
+ state.initialize(self)
+ else:
+ for state in sub_obj.states.values():
+ state.initialize(self)
+ else:
+ for state in obj.states.values():
+ state.initialize(self)
- self.particle_systems.append(particle_system)
- particle_system.initialize(self)
+ return new_object_ids
@load_without_pybullet_vis
- def import_object(self, obj):
+ def import_particle_system(self, obj):
+ """
+ Import an object into the simulator
+ :param obj: ParticleSystem to load
"""
- Import a non-robot object into the simulator.
- :param obj: a non-robot object to load
+ assert isinstance(obj, ParticleSystem), "import_particle_system can only be called with ParticleSystem"
+
+ self.particle_systems.append(obj)
+ obj.initialize(self)
+
+ @load_without_pybullet_vis
+ def import_object(
+ self, obj, class_id=SemanticClass.USER_ADDED_OBJS, use_pbr=True, use_pbr_mapping=True, shadow_caster=True
+ ):
+ """
+ Import an object into the simulator
+
+ :param obj: Object to load
+ :param class_id: Class id for rendering semantic segmentation
+ :param use_pbr: Whether to use pbr
+ :param use_pbr_mapping: Whether to use pbr mapping
+ :param shadow_caster: Whether to cast shadow
"""
- assert isinstance(obj, NonRobotObject), "import_object can only be called with NonRobotObject"
+ assert isinstance(obj, Object), "import_object can only be called with Object"
if isinstance(obj, VisualMarker) or isinstance(obj, Particle):
# Marker objects can be imported without a scene.
- obj.load(self)
+ new_object_pb_id_or_ids = obj.load()
else:
# Non-marker objects require a Scene to be imported.
+ assert self.scene is not None, "A scene must be imported before additional objects can be imported."
# Load the object in pybullet. Returns a pybullet id that we can use to load it in the renderer
- self.scene.add_object(obj, self, _is_call_from_simulator=True)
+ new_object_pb_id_or_ids = self.scene.add_object(obj, _is_call_from_simulator=True)
+
+ # If no new bodies are immediately imported into pybullet, we have no rendering steps.
+ if new_object_pb_id_or_ids is None:
+ return None
+
+ if isinstance(new_object_pb_id_or_ids, list):
+ new_object_pb_ids = new_object_pb_id_or_ids
+ else:
+ new_object_pb_ids = [new_object_pb_id_or_ids]
+ self.objects += new_object_pb_ids
+
+ for i, new_object_pb_id in enumerate(new_object_pb_ids):
+ if isinstance(obj, ArticulatedObject) or isinstance(obj, URDFObject):
+ if isinstance(obj, ArticulatedObject):
+ visual_mesh_to_material = None
+ else:
+ visual_mesh_to_material = obj.visual_mesh_to_material[i]
+ link_name_to_vm = obj.link_name_to_vm[i]
+ self.load_articulated_object_in_renderer(
+ new_object_pb_id,
+ class_id=class_id,
+ use_pbr=use_pbr,
+ use_pbr_mapping=use_pbr_mapping,
+ visual_mesh_to_material=visual_mesh_to_material,
+ link_name_to_vm=link_name_to_vm,
+ shadow_caster=shadow_caster,
+ physical_object=obj,
+ )
+ else:
+ softbody = obj.__class__.__name__ == "SoftObject"
+ self.load_object_in_renderer(
+ new_object_pb_id,
+ class_id=class_id,
+ softbody=softbody,
+ use_pbr=use_pbr,
+ use_pbr_mapping=use_pbr_mapping,
+ shadow_caster=shadow_caster,
+ physical_object=obj,
+ )
+
+ # Finally, initialize the object's states
+ if isinstance(obj, StatefulObject):
+ if isinstance(obj, ObjectMultiplexer):
+ for sub_obj in obj._multiplexed_objects:
+ if isinstance(sub_obj, ObjectGrouper):
+ for group_sub_obj in sub_obj.objects:
+ for state in group_sub_obj.states.values():
+ state.initialize(self)
+ else:
+ for state in sub_obj.states.values():
+ state.initialize(self)
+ else:
+ for state in obj.states.values():
+ state.initialize(self)
+
+ return new_object_pb_id_or_ids
@load_without_pybullet_vis
- def import_robot(self, robot):
+ def load_visual_sphere(self, radius, color=[1, 0, 0]):
"""
- Import a robot into the simulator.
-
- :param robot: a robot object to load
+ Load a visual-only (not controlled by pybullet) sphere into the renderer.
+ Such a sphere can be moved around without affecting PyBullet determinism.
+ :param radius: the radius of the visual sphere in meters
+ :param color: RGB color of sphere (from 0 to 1 on each axis)
"""
- # TODO: Remove this function in favor of unifying with import_object.
- assert isinstance(robot, (BaseRobot, BehaviorRobot)), "import_robot can only be called with Robots"
+ sphere_file = os.path.join(igibson.assets_path, "models/mjcf_primitives/sphere8.obj")
+ self.renderer.load_object(
+ sphere_file,
+ transform_orn=[0, 0, 0, 1],
+ transform_pos=[0, 0, 0],
+ input_kd=[1, 0, 0],
+ scale=[radius, radius, radius],
+ )
+ visual_object = len(self.renderer.get_visual_objects()) - 1
+ self.renderer.add_instance(
+ visual_object,
+ pybullet_uuid=0, # this can be ignored
+ class_id=1, # this can be ignored
+ dynamic=False,
+ softbody=False,
+ use_pbr=False,
+ use_pbr_mapping=False,
+ shadow_caster=False,
+ )
+ # Return instance so we can control it
+ return self.renderer.instances[-1]
- # TODO: remove this if statement after BehaviorRobot refactoring
- if isinstance(robot, BaseRobot):
- assert (
- robot.control_freq is None
- ), "control_freq should NOT be specified in robot config. Currently this value is automatically inferred from simulator.render_timestep!"
- control_freq = 1.0 / self.render_timestep
- robot.control_freq = control_freq
+ @load_without_pybullet_vis
+ def load_object_in_renderer(
+ self,
+ object_pb_id,
+ class_id=None,
+ softbody=False,
+ texture_scale=1.0,
+ load_texture=True,
+ render_floor_plane=False,
+ use_pbr=True,
+ use_pbr_mapping=True,
+ shadow_caster=True,
+ physical_object=None,
+ ):
+ """
+ Load the object into renderer
+ :param object_pb_id: pybullet body id
+ :param class_id: Class id for rendering semantic segmentation
+ :param softbody: Whether the object is soft body
+ :param texture_scale: Texture scale
+ :param load_texture: If you don't need rgb output, texture loading could be skipped to make rendering faster
+ :param render_floor_plane: Whether to render the additionally added floor plane
+ :param use_pbr: Whether to use pbr
+ :param use_pbr_mapping: Whether to use pbr mapping
+ :param shadow_caster: Whether to cast shadow
+ :param physical_object: The reference to Object class
+ """
- self.scene.add_object(robot, self, _is_call_from_simulator=True)
- self.robots.append(robot)
+ # Load object in renderer, use visual shape and base_link frame
+ # not CoM frame
+ # Do not load URDFObject or ArticulatedObject with this function
+ if physical_object is not None and (
+ isinstance(physical_object, ArticulatedObject) or isinstance(physical_object, URDFObject)
+ ):
+ raise ValueError("loading articulated object with load_object_in_renderer function")
+
+ for shape in p.getVisualShapeData(object_pb_id):
+ id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = shape[:8]
+ dynamics_info = p.getDynamicsInfo(id, link_id)
+ inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
+ rel_pos, rel_orn = p.multiplyTransforms(*p.invertTransform(inertial_pos, inertial_orn), rel_pos, rel_orn)
+ # visual meshes frame are transformed from the urdfLinkFrame as origin to comLinkFrame as origin
+ visual_object = None
+ if type == p.GEOM_MESH:
+ filename = filename.decode("utf-8")
+ if (filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn)) not in self.visual_objects.keys():
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=np.array(dimensions),
+ texture_scale=texture_scale,
+ load_texture=load_texture,
+ )
+ self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (
+ len(self.renderer.visual_objects) - 1
+ )
+ visual_object = self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))]
+ elif type == p.GEOM_SPHERE:
+ filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/sphere8.obj")
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5],
+ )
+ visual_object = len(self.renderer.get_visual_objects()) - 1
+ elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:
+ filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cube.obj")
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]],
+ )
+ visual_object = len(self.renderer.get_visual_objects()) - 1
+ elif type == p.GEOM_BOX:
+ filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cube.obj")
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=np.array(dimensions),
+ )
+ visual_object = len(self.renderer.visual_objects) - 1
+ elif type == p.GEOM_PLANE:
+ # By default, we add an additional floor surface to "smooth out" that of the original mesh.
+ # Normally you don't need to render this additionally added floor surface.
+ # However, if you do want to render it for some reason, you can set render_floor_plane to be True.
+ if render_floor_plane:
+ filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cube.obj")
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=[100, 100, 0.01],
+ )
+ visual_object = len(self.renderer.visual_objects) - 1
+ if visual_object is not None:
+ self.renderer.add_instance(
+ visual_object,
+ pybullet_uuid=object_pb_id,
+ class_id=class_id,
+ dynamic=True,
+ softbody=softbody,
+ use_pbr=use_pbr,
+ use_pbr_mapping=use_pbr_mapping,
+ shadow_caster=shadow_caster,
+ )
+ if physical_object is not None:
+ physical_object.renderer_instances.append(self.renderer.instances[-1])
@load_without_pybullet_vis
- def load_object_in_renderer(
+ def load_articulated_object_in_renderer(
self,
- obj,
- body_id,
- class_id,
- link_name_to_vm=None,
+ object_pb_id,
+ physical_object,
+ link_name_to_vm,
+ class_id=None,
visual_mesh_to_material=None,
use_pbr=True,
use_pbr_mapping=True,
shadow_caster=True,
- softbody=False,
- texture_scale=1.0,
):
"""
- Load an object into the MeshRenderer. The object should be already loaded into pybullet.
-
- :param obj: an object to load
- :param body_id: pybullet body id
- :param class_id: class id to render semantics
- :param link_name_to_vm: a link-name-to-visual-mesh mapping
- :param visual_mesh_to_material: a visual-mesh-to-material mapping
- :param use_pbr: whether to use PBR
- :param use_pbr_mapping: whether to use PBR mapping
- :param shadow_caster: whether to cast shadow
- :param softbody: whether the instance group is for a soft body
- :param texture_scale: texture scale for the object, downsample to save memory
- """
- # First, grab all the visual shapes.
- if link_name_to_vm:
- # If a manual link-name-to-visual-mesh mapping is given, use that to generate list of shapes.
- shapes = []
- for link_id in list(range(p.getNumJoints(body_id))) + [-1]:
- if link_id == PYBULLET_BASE_LINK_INDEX:
- link_name = p.getBodyInfo(body_id)[0].decode("utf-8")
- else:
- link_name = p.getJointInfo(body_id, link_id)[12].decode("utf-8")
+ Load the articulated object into renderer
+
+ :param object_pb_id: pybullet body id
+ :param physical_object: The reference to Object class
+ :param link_name_to_vm: mapping from link name to a list of visual mesh file paths
+ :param class_id: Class id for rendering semantic segmentation
+ :param visual_mesh_to_material: mapping from visual mesh to randomizable materials
+ :param use_pbr: Whether to use pbr
+ :param use_pbr_mapping: Whether to use pbr mapping
+ :param shadow_caster: Whether to cast shadow
+ """
+ # Load object in renderer, use visual shape from physical_object class
+ # using CoM frame
+ # only load URDFObject or ArticulatedObject with this function
+ if not (
+ isinstance(physical_object, ArticulatedObject)
+ or isinstance(physical_object, URDFObject)
+ or isinstance(physical_object, ObjectMultiplexer)
+ ):
+ raise ValueError("loading non-articulated object with load_articulated_object_in_renderer function")
+
+ visual_objects = []
+ link_ids = []
+ poses_rot = []
+ poses_trans = []
+ color = [0, 0, 0]
+ for link_id in list(range(p.getNumJoints(object_pb_id))) + [-1]:
+ if link_id == -1:
+ link_name = p.getBodyInfo(object_pb_id)[0].decode("utf-8")
+ else:
+ link_name = p.getJointInfo(object_pb_id, link_id)[12].decode("utf-8")
- collision_shapes = p.getCollisionShapeData(body_id, link_id)
- collision_shapes = [item for item in collision_shapes if item[2] == p.GEOM_MESH]
- # a link can have multiple collision meshes due to boxification,
- # and we want to query the original collision mesh for information
+ collision_shapes = p.getCollisionShapeData(object_pb_id, link_id)
+ collision_shapes = [item for item in collision_shapes if item[2] == p.GEOM_MESH]
+ # a link can have multiple collision meshes due to boxification,
+ # and we want to query the original collision mesh for information
- if len(collision_shapes) == 0:
- continue
- else:
- _, _, _, dimensions, filename, rel_pos, rel_orn = collision_shapes[0]
+ if len(collision_shapes) == 0:
+ continue
+ else:
+ _, _, type, dimensions, filename, rel_pos, rel_orn = collision_shapes[0]
- filenames = link_name_to_vm[link_name]
- for filename in filenames:
- overwrite_material = None
- if visual_mesh_to_material is not None and filename in visual_mesh_to_material:
- overwrite_material = visual_mesh_to_material[filename]
+ filenames = link_name_to_vm[link_name]
+ for filename in filenames:
+ overwrite_material = None
+ if visual_mesh_to_material is not None and filename in visual_mesh_to_material:
+ overwrite_material = visual_mesh_to_material[filename]
- shapes.append(
- (link_id, p.GEOM_MESH, dimensions, filename, rel_pos, rel_orn, [0, 0, 0], overwrite_material)
+ if (
+ filename,
+ tuple(dimensions),
+ tuple(rel_pos),
+ tuple(rel_orn),
+ ) not in self.visual_objects.keys() or overwrite_material is not None:
+ # if the object has an overwrite material, always create a
+ # new visual object even if the same visual shape exsits
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=np.array(dimensions),
+ overwrite_material=overwrite_material,
)
- else:
- # Pull the visual shapes from pybullet
- shapes = []
- for shape in p.getVisualShapeData(body_id):
- link_id, type, dimensions, filename, rel_pos, rel_orn, color = shape[1:8]
+ self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (
+ len(self.renderer.visual_objects) - 1
+ )
+ visual_objects.append(
+ self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))]
+ )
+ link_ids.append(link_id)
+
+ if link_id == -1:
+ pos, orn = p.getBasePositionAndOrientation(object_pb_id)
+ else:
+ pos, orn = p.getLinkState(object_pb_id, link_id)[:2]
+ poses_rot.append(np.ascontiguousarray(quat2rotmat(xyzw2wxyz(orn))))
+ poses_trans.append(np.ascontiguousarray(xyz2mat(pos)))
+
+ self.renderer.add_instance_group(
+ object_ids=visual_objects,
+ link_ids=link_ids,
+ pybullet_uuid=object_pb_id,
+ class_id=class_id,
+ poses_trans=poses_trans,
+ poses_rot=poses_rot,
+ dynamic=True,
+ robot=None,
+ use_pbr=use_pbr,
+ use_pbr_mapping=use_pbr_mapping,
+ shadow_caster=shadow_caster,
+ )
- if filename:
- filename = filename.decode("utf-8")
+ if physical_object is not None:
+ physical_object.renderer_instances.append(self.renderer.instances[-1])
- # visual meshes frame are transformed from the urdfLinkFrame as origin to comLinkFrame as origin
- dynamics_info = p.getDynamicsInfo(body_id, link_id)
+ def import_non_colliding_objects(self, objects, existing_objects=[], min_distance=0.5):
+ """
+ Loads objects into the scene such that they don't collide with existing objects.
+
+ :param objects: A dictionary with objects, from a scene loaded with a particular URDF
+ :param existing_objects: A list of objects that needs to be kept min_distance away when loading the new objects
+ :param min_distance: A minimum distance to require for objects to load
+ """
+ state_id = p.saveState()
+ objects_to_add = []
+ for obj_name in objects:
+ obj = objects[obj_name]
+
+ # Do not allow duplicate object categories
+ if obj.category in self.scene.objects_by_category:
+ continue
+
+ add = True
+ body_ids = []
+
+ # Filter based on the minimum distance to any existing object
+ for idx in range(len(obj.urdf_paths)):
+ body_id = p.loadURDF(obj.urdf_paths[idx])
+ body_ids.append(body_id)
+ transformation = obj.poses[idx]
+ pos = transformation[0:3, 3]
+ orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))
+ dynamics_info = p.getDynamicsInfo(body_id, -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
- rel_pos, rel_orn = p.multiplyTransforms(
- *p.invertTransform(inertial_pos, inertial_orn), rel_pos, rel_orn
- )
- shapes.append((link_id, type, dimensions, filename, rel_pos, rel_orn, color, None))
+ pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
+ pos = list(pos)
+ min_distance_to_existing_object = None
+ for existing_object in existing_objects:
+ # If a sliced obj is an existing_object, get_position will not work
+ if isinstance(existing_object, ObjectMultiplexer) and isinstance(
+ existing_object.current_selection(), ObjectGrouper
+ ):
+ obj_pos = np.array([obj.get_position() for obj in existing_object.objects]).mean(axis=0)
+ else:
+ obj_pos = existing_object.get_position()
+ distance = np.linalg.norm(np.array(pos) - np.array(obj_pos))
+ if min_distance_to_existing_object is None or min_distance_to_existing_object > distance:
+ min_distance_to_existing_object = distance
+
+ if min_distance_to_existing_object < min_distance:
+ add = False
+ break
+
+ pos[2] += 0.01 # slighly above to not touch furniture
+ p.resetBasePositionAndOrientation(body_id, pos, orn)
+
+ # Filter based on collisions with any existing object
+ if add:
+ p.stepSimulation()
+
+ for body_id in body_ids:
+ in_collision = len(p.getContactPoints(body_id)) > 0
+ if in_collision:
+ add = False
+ break
+
+ if add:
+ objects_to_add.append(obj)
+
+ for body_id in body_ids:
+ p.removeBody(body_id)
+
+ p.restoreState(state_id)
+
+ p.removeState(state_id)
+
+ for obj in objects_to_add:
+ self.import_object(obj)
- # Now that we have the visual shapes, let's add them to the renderer.
+ @load_without_pybullet_vis
+ def import_robot(self, robot, class_id=SemanticClass.ROBOTS):
+ """
+ Import a robot into the simulator
+
+ :param robot: Robot
+ :param class_id: Class id for rendering semantic segmentation
+ :return: pybullet id
+ """
+ assert isinstance(robot, BaseRobot), "import_robot can only be called with BaseRobot"
+ ids = robot.load()
visual_objects = []
link_ids = []
poses_rot = []
poses_trans = []
- for shape in shapes:
- link_id, type, dimensions, filename, rel_pos, rel_orn, color, overwrite_material = shape
+ self.robots.append(robot)
- # Specify a filename if our object is not a mesh
- if type == p.GEOM_SPHERE:
+ for shape in p.getVisualShapeData(ids[0]):
+ id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = shape[:8]
+ dynamics_info = p.getDynamicsInfo(id, link_id)
+ inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
+ rel_pos, rel_orn = p.multiplyTransforms(*p.invertTransform(inertial_pos, inertial_orn), rel_pos, rel_orn)
+ # visual meshes frame are transformed from the urdfLinkFrame as origin to comLinkFrame as origin
+
+ if type == p.GEOM_MESH:
+ filename = filename.decode("utf-8")
+ if (filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn)) not in self.visual_objects.keys():
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=np.array(dimensions),
+ )
+ self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (
+ len(self.renderer.visual_objects) - 1
+ )
+ visual_objects.append(
+ self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))]
+ )
+ link_ids.append(link_id)
+ elif type == p.GEOM_SPHERE:
filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/sphere8.obj")
- dimensions = [dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5]
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5],
+ )
+ visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
+ link_ids.append(link_id)
elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:
- filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cylinder16.obj")
- dimensions = [dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]]
- if not os.path.exists(filename):
- logging.info(
- "Cylinder mesh file cannot be found in the assets. Consider removing the assets folder and downloading the newest version using download_assets(). Using a cube for backcompatibility"
- )
- filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cube.obj")
- dimensions = [dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5]
- elif type == p.GEOM_BOX:
filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cube.obj")
- elif type == p.GEOM_PLANE:
+ self.renderer.load_object(
+ filename,
+ transform_orn=rel_orn,
+ transform_pos=rel_pos,
+ input_kd=color[:3],
+ scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]],
+ )
+ visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
+ link_ids.append(link_id)
+ elif type == p.GEOM_BOX:
filename = os.path.join(igibson.assets_path, "models/mjcf_primitives/cube.obj")
- dimensions = [100, 100, 0.01]
-
- # Always load overwrite material
- if overwrite_material is not None:
- if isinstance(overwrite_material, RandomizedMaterial):
- self.renderer.load_randomized_material(overwrite_material, texture_scale)
- elif isinstance(overwrite_material, ProceduralMaterial):
- self.renderer.load_procedural_material(overwrite_material, texture_scale)
-
- # Load the visual object if it doesn't already exist.
- if (filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn)) not in self.visual_objects.keys():
self.renderer.load_object(
filename,
transform_orn=rel_orn,
transform_pos=rel_pos,
input_kd=color[:3],
scale=np.array(dimensions),
- texture_scale=texture_scale,
- overwrite_material=overwrite_material,
- )
- self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (
- len(self.renderer.visual_objects) - 1
)
+ visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
+ link_ids.append(link_id)
- # Keep track of the objects we just loaded.
- visual_objects.append(self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))])
- link_ids.append(link_id)
-
- # Keep track of the positions.
- if link_id == PYBULLET_BASE_LINK_INDEX:
- pos, orn = p.getBasePositionAndOrientation(body_id)
+ if link_id == -1:
+ pos, orn = p.getBasePositionAndOrientation(id)
else:
- pos, orn = p.getLinkState(body_id, link_id)[:2]
+ pos, orn = p.getLinkState(id, link_id)[:2]
poses_rot.append(np.ascontiguousarray(quat2rotmat(xyzw2wxyz(orn))))
poses_trans.append(np.ascontiguousarray(xyz2mat(pos)))
- # Finally, create and add the instance group for this object.
- self.renderer.add_instance_group(
+ self.renderer.add_robot(
object_ids=visual_objects,
link_ids=link_ids,
- pybullet_uuid=body_id,
- ig_object=obj,
+ pybullet_uuid=ids[0],
class_id=class_id,
- poses_trans=poses_trans,
poses_rot=poses_rot,
- softbody=softbody,
+ poses_trans=poses_trans,
dynamic=True,
- use_pbr=use_pbr,
- use_pbr_mapping=use_pbr_mapping,
- shadow_caster=shadow_caster,
+ robot=robot,
+ )
+
+ for state in robot.states.values():
+ state.initialize(self)
+
+ return ids
+
+ def add_normal_text(
+ self,
+ text_data="PLACEHOLDER: PLEASE REPLACE!",
+ font_name="OpenSans",
+ font_style="Regular",
+ font_size=48,
+ color=[0, 0, 0],
+ pos=[0, 100],
+ size=[20, 20],
+ scale=1.0,
+ background_color=None,
+ ):
+ """
+ Creates a Text object to be rendered to a non-VR screen. Returns the text object to the caller,
+ so various settings can be changed - eg. text content, position, scale, etc.
+ :param text_data: starting text to display (can be changed at a later time by set_text)
+ :param font_name: name of font to render - same as font folder in iGibson assets
+ :param font_style: style of font - one of [regular, italic, bold]
+ :param font_size: size of font to render
+ :param color: [r, g, b] color
+ :param pos: [x, y] position of top-left corner of text box, in percentage across screen
+ :param size: [w, h] size of text box in percentage across screen-space axes
+ :param scale: scale factor for resizing text
+ :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None
+ """
+ # Note: For pos/size - (0,0) is bottom-left and (100, 100) is top-right
+ # Calculate pixel positions for text
+ pixel_pos = [int(pos[0] / 100.0 * self.renderer.width), int(pos[1] / 100.0 * self.renderer.height)]
+ pixel_size = [int(size[0] / 100.0 * self.renderer.width), int(size[1] / 100.0 * self.renderer.height)]
+ return self.renderer.add_text(
+ text_data=text_data,
+ font_name=font_name,
+ font_style=font_style,
+ font_size=font_size,
+ color=color,
+ pixel_pos=pixel_pos,
+ pixel_size=pixel_size,
+ scale=scale,
+ background_color=background_color,
+ render_to_tex=False,
)
- # Add the instance onto the object as well.
- # TODO: Remove condition and/or migrate to owning classes.
- if hasattr(obj, "renderer_instances"):
- obj.renderer_instances.append(self.renderer.instances[-1])
+ def add_vr_overlay_text(
+ self,
+ text_data="PLACEHOLDER: PLEASE REPLACE!",
+ font_name="OpenSans",
+ font_style="Regular",
+ font_size=48,
+ color=[0, 0, 0],
+ pos=[20, 80],
+ size=[70, 80],
+ scale=1.0,
+ background_color=[1, 1, 1, 0.8],
+ ):
+ """
+ Creates Text for use in a VR overlay. Returns the text object to the caller,
+ so various settings can be changed - eg. text content, position, scale, etc.
+ :param text_data: starting text to display (can be changed at a later time by set_text)
+ :param font_name: name of font to render - same as font folder in iGibson assets
+ :param font_style: style of font - one of [regular, italic, bold]
+ :param font_size: size of font to render
+ :param color: [r, g, b] color
+ :param pos: [x, y] position of top-left corner of text box, in percentage across screen
+ :param size: [w, h] size of text box in percentage across screen-space axes
+ :param scale: scale factor for resizing text
+ :param background_color: color of the background in form [r, g, b, a] - default is semi-transparent white so text is easy to read in VR
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+ if not self.vr_overlay_initialized:
+ # This function automatically creates a VR text overlay the first time text is added
+ self.renderer.gen_vr_hud()
+ self.vr_overlay_initialized = True
+
+ # Note: For pos/size - (0,0) is bottom-left and (100, 100) is top-right
+ # Calculate pixel positions for text
+ pixel_pos = [int(pos[0] / 100.0 * self.renderer.width), int(pos[1] / 100.0 * self.renderer.height)]
+ pixel_size = [int(size[0] / 100.0 * self.renderer.width), int(size[1] / 100.0 * self.renderer.height)]
+ return self.renderer.add_text(
+ text_data=text_data,
+ font_name=font_name,
+ font_style=font_style,
+ font_size=font_size,
+ color=color,
+ pixel_pos=pixel_pos,
+ pixel_size=pixel_size,
+ scale=scale,
+ background_color=background_color,
+ render_to_tex=True,
+ )
+
+ def add_overlay_image(self, image_fpath, width=1, pos=[0, 0, -1]):
+ """
+ Add an image with a given file path to the VR overlay. This image will be displayed
+ in addition to any text that the users wishes to display. This function returns a handle
+ to the VrStaticImageOverlay, so the user can display/hide it at will.
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+ return self.renderer.gen_static_overlay(image_fpath, width=width, pos=pos)
+
+ def set_hud_show_state(self, show_state):
+ """
+ Shows/hides the main VR HUD.
+ :param show_state: whether to show HUD or not
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+ self.renderer.vr_hud.set_overlay_show_state(show_state)
+
+ def get_hud_show_state(self):
+ """
+ Returns the show state of the main VR HUD.
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+ return self.renderer.vr_hud.get_overlay_show_state()
def _non_physics_step(self):
"""
@@ -443,56 +1045,416 @@ def _non_physics_step(self):
for obj in self.scene.get_objects_with_state(state_type):
obj.states[state_type].update()
- # Step the object procedural materials based on the updated object states.
+ # Step the object procedural materials based on the updated object states
for obj in self.scene.get_objects():
if hasattr(obj, "procedural_material") and obj.procedural_material is not None:
obj.procedural_material.update()
- def step(self):
+ def step_vr(self, print_stats=False):
"""
- Step the simulation at self.render_timestep and update positions in renderer.
+ Step the simulation when using VR. Order of function calls:
+ 1) Simulate physics
+ 2) Render frame
+ 3) Submit rendered frame to VR compositor
+ 4) Update VR data for use in the next frame
"""
+ assert (
+ self.scene is not None
+ ), "A scene must be imported before running the simulator. Use EmptyScene for an empty scene."
+
+ # Calculate time outside of step
+ outside_step_dur = 0
+ if self.frame_end_time is not None:
+ outside_step_dur = time.perf_counter() - self.frame_end_time
+ # Simulate Physics in PyBullet
+ physics_start_time = time.perf_counter()
for _ in range(self.physics_timestep_num):
p.stepSimulation()
+ physics_dur = time.perf_counter() - physics_start_time
+ non_physics_start_time = time.perf_counter()
self._non_physics_step()
+ non_physics_dur = time.perf_counter() - non_physics_start_time
+
+ # Sync PyBullet bodies to renderer and then render to Viewer
+ render_start_time = time.perf_counter()
self.sync()
+ render_dur = time.perf_counter() - render_start_time
+
+ # Sleep until last possible Vsync
+ pre_sleep_dur = outside_step_dur + physics_dur + non_physics_dur + render_dur
+ sleep_start_time = time.perf_counter()
+ if pre_sleep_dur < self.non_block_frame_time:
+ sleep(self.non_block_frame_time - pre_sleep_dur)
+ sleep_dur = time.perf_counter() - sleep_start_time
+
+ # Update VR compositor and VR data
+ vr_system_start = time.perf_counter()
+ # First sync VR compositor - this is where Oculus blocks (as opposed to Vive, which blocks in update_vr_data)
+ self.sync_vr_compositor()
+ # Note: this should only be called once per frame - use get_vr_events to read the event data list in
+ # subsequent read operations
+ self.poll_vr_events()
+ # This is necessary to fix the eye tracking value for the current frame, since it is multi-threaded
+ self.fix_eye_tracking_value()
+ # Move user to their starting location
+ self.perform_vr_start_pos_move()
+ # Update VR data and wait until 3ms before the next vsync
+ self.renderer.update_vr_data()
+ # Update VR system data - eg. offsets, haptics, etc.
+ self.vr_system_update()
+ vr_system_dur = time.perf_counter() - vr_system_start
+
+ # Calculate final frame duration
+ # Make sure it is non-zero for FPS calculation (set to max of 1000 if so)
+ frame_dur = max(1e-3, pre_sleep_dur + sleep_dur + vr_system_dur)
+
+ # Set variables for data saving and replay
+ self.last_physics_timestep = physics_dur
+ self.last_render_timestep = render_dur
+ self.last_frame_dur = frame_dur
+
+ if print_stats:
+ print("Frame number {} statistics (ms)".format(self.frame_count))
+ print("Total out-of-step duration: {}".format(outside_step_dur * 1000))
+ print("Total physics duration: {}".format(physics_dur * 1000))
+ print("Total non-physics duration: {}".format(non_physics_dur * 1000))
+ print("Total render duration: {}".format(render_dur * 1000))
+ print("Total sleep duration: {}".format(sleep_dur * 1000))
+ print("Total VR system duration: {}".format(vr_system_dur * 1000))
+ print("Total frame duration: {} and fps: {}".format(frame_dur * 1000, 1 / frame_dur))
+ print(
+ "Realtime factor: {}".format(round((self.physics_timestep_num * self.physics_timestep) / frame_dur, 3))
+ )
+ print("-------------------------")
+
self.frame_count += 1
+ self.frame_end_time = time.perf_counter()
- def sync(self, force_sync=False):
+ def step(self, print_stats=False, save_video=False, task_name=""):
+ """
+ Step the simulation at self.render_timestep and update positions in renderer
"""
- Update positions in renderer without stepping the simulation. Usually used in the reset() function.
+ # Call separate step function for VR
+ if self.can_access_vr_context:
+ self.step_vr(print_stats=print_stats)
+ return
+
+ for _ in range(self.physics_timestep_num):
+ p.stepSimulation()
+
+ self._non_physics_step()
+ self.sync(save_video=save_video, task_name=task_name)
+ self.frame_count += 1
- :param force_sync: whether to force sync the objects in renderer
+ def sync(self, force_sync=False, save_video=False, task_name=""):
+ """
+ Update positions in renderer without stepping the simulation. Usually used in the reset() function
"""
self.body_links_awake = 0
for instance in self.renderer.instances:
if instance.dynamic:
- self.body_links_awake += self.update_position(instance, force_sync=force_sync or self.first_sync)
- if self.viewer is not None:
- self.viewer.update()
+ self.body_links_awake += self.update_position(instance, force_sync=force_sync)
+ if (self.use_ig_renderer or self.use_vr_renderer or self.use_simple_viewer) and self.viewer is not None:
+ self.viewer.update(save_video=save_video, task_name=task_name)
if self.first_sync:
self.first_sync = False
+ def vr_system_update(self):
+ """
+ Updates the VR system for a single frame. This includes moving the vr offset,
+ adjusting the user's height based on button input, and triggering haptics.
+ """
+ # Update VR offset using appropriate controller
+ if self.vr_settings.touchpad_movement:
+ vr_offset_device = "{}_controller".format(self.vr_settings.movement_controller)
+ is_valid, _, _ = self.get_data_for_vr_device(vr_offset_device)
+ if is_valid:
+ _, touch_x, touch_y = self.get_button_data_for_controller(vr_offset_device)
+ new_offset = calc_offset(
+ self, touch_x, touch_y, self.vr_settings.movement_speed, self.vr_settings.relative_movement_device
+ )
+ self.set_vr_offset(new_offset)
+
+ # Adjust user height based on y-axis (vertical direction) touchpad input
+ vr_height_device = "left_controller" if self.vr_settings.movement_controller == "right" else "right_controller"
+ is_height_valid, _, _ = self.get_data_for_vr_device(vr_height_device)
+ if is_height_valid:
+ curr_offset = self.get_vr_offset()
+ hmd_height = self.get_hmd_world_pos()[2]
+ _, _, height_y = self.get_button_data_for_controller(vr_height_device)
+ if height_y < -0.7:
+ vr_z_offset = -0.01
+ if hmd_height + curr_offset[2] + vr_z_offset >= self.vr_settings.height_bounds[0]:
+ self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])
+ elif height_y > 0.7:
+ vr_z_offset = 0.01
+ if hmd_height + curr_offset[2] + vr_z_offset <= self.vr_settings.height_bounds[1]:
+ self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])
+
+ # Update haptics for body and hands
+ if self.main_vr_robot:
+ vr_body_id = self.main_vr_robot.parts["body"].body_id
+ vr_hands = [
+ ("left_controller", self.main_vr_robot.parts["left_hand"]),
+ ("right_controller", self.main_vr_robot.parts["right_hand"]),
+ ]
+
+ # Check for body haptics
+ wall_ids = self.get_category_ids("walls")
+ for c_info in p.getContactPoints(vr_body_id):
+ if wall_ids and (c_info[1] in wall_ids or c_info[2] in wall_ids):
+ for controller in ["left_controller", "right_controller"]:
+ is_valid, _, _ = self.get_data_for_vr_device(controller)
+ if is_valid:
+ # Use 90% strength for body to warn user of collision with wall
+ self.trigger_haptic_pulse(controller, 0.9)
+
+ # Check for hand haptics
+ for hand_device, hand_obj in vr_hands:
+ is_valid, _, _ = self.get_data_for_vr_device(hand_device)
+ if is_valid:
+ if len(p.getContactPoints(hand_obj.body_id)) > 0 or (
+ hasattr(hand_obj, "object_in_hand") and hand_obj.object_in_hand
+ ):
+ # Only use 30% strength for normal collisions, to help add realism to the experience
+ self.trigger_haptic_pulse(hand_device, 0.3)
+
+ def register_main_vr_robot(self, vr_robot):
+ """
+ Register the robot representing the VR user.
+ """
+ self.main_vr_robot = vr_robot
+
+ def import_behavior_robot(self, bvr_robot):
+ """
+ Import registered behavior robot into the simulator.
+ """
+ assert isinstance(bvr_robot, BehaviorRobot), "import_robot can only be called with BaseRobot"
+ self.robots.append(bvr_robot)
+ for part_name, part_obj in bvr_robot.parts.items():
+ self.import_object(part_obj, use_pbr=False, use_pbr_mapping=False, shadow_caster=True)
+ if bvr_robot.use_ghost_hands and part_name in ["left_hand", "right_hand"]:
+ # Ghost hands don't cast shadows
+ self.import_object(part_obj.ghost_hand, use_pbr=False, use_pbr_mapping=False, shadow_caster=False)
+ if part_name == "eye":
+ # BREye doesn't cast shadows either
+ self.import_object(
+ part_obj.head_visual_marker, use_pbr=False, use_pbr_mapping=False, shadow_caster=False
+ )
+
+ def gen_vr_data(self):
+ """
+ Generates a VrData object containing all of the data required to describe the VR system in the current frame.
+ This data is used to power the BehaviorRobot each frame.
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("Unable to get VR data for current frame since VR system is not being used!")
+
+ v = dict()
+ for device in VR_DEVICES:
+ is_valid, trans, rot = self.get_data_for_vr_device(device)
+ device_data = [is_valid, trans.tolist(), rot.tolist()]
+ device_data.extend(self.get_device_coordinate_system(device))
+ v[device] = device_data
+ if device in VR_CONTROLLERS:
+ v["{}_button".format(device)] = self.get_button_data_for_controller(device)
+
+ # Store final rotations of hands, with model rotation applied
+ for hand in ["right", "left"]:
+ # Base rotation quaternion
+ base_rot = self.main_vr_robot.parts["{}_hand".format(hand)].base_rot
+ # Raw rotation of controller
+ controller_rot = v["{}_controller".format(hand)][2]
+ # Use dummy translation to calculation final rotation
+ final_rot = p.multiplyTransforms([0, 0, 0], controller_rot, [0, 0, 0], base_rot)[1]
+ v["{}_controller".format(hand)].append(final_rot)
+
+ is_valid, torso_trans, torso_rot = self.get_data_for_vr_tracker(self.vr_settings.torso_tracker_serial)
+ v["torso_tracker"] = [is_valid, torso_trans, torso_rot]
+ v["eye_data"] = self.get_eye_tracking_data()
+ v["event_data"] = self.get_vr_events()
+ reset_actions = []
+ for controller in VR_CONTROLLERS:
+ reset_actions.append(self.query_vr_event(controller, "reset_agent"))
+ v["reset_actions"] = reset_actions
+ v["vr_positions"] = [self.get_vr_pos().tolist(), list(self.get_vr_offset())]
+
+ return VrData(v)
+
+ def gen_vr_robot_action(self):
+ """
+ Generates an action for the BehaviorRobot to perform based on VrData collected this frame.
+
+ Action space (all non-normalized values that will be clipped if they are too large)
+ * See BehaviorRobot.py for details on the clipping thresholds for
+ Body:
+ - 6DOF pose delta - relative to body frame from previous frame
+ Eye:
+ - 6DOF pose delta - relative to body frame (where the body will be after applying this frame's action)
+ Left hand, right hand (in that order):
+ - 6DOF pose delta - relative to body frame (same as above)
+ - Trigger fraction delta
+ - Action reset value
+
+ Total size: 28
+ """
+ # Actions are stored as 1D numpy array
+ action = np.zeros((28,))
+
+ # Get VrData for the current frame
+ v = self.gen_vr_data()
+
+ # Update body action space
+ hmd_is_valid, hmd_pos, hmd_orn, hmd_r = v.query("hmd")[:4]
+ torso_is_valid, torso_pos, torso_orn = v.query("torso_tracker")
+ vr_body = self.main_vr_robot.parts["body"]
+ prev_body_pos, prev_body_orn = vr_body.get_position_orientation()
+ inv_prev_body_pos, inv_prev_body_orn = p.invertTransform(prev_body_pos, prev_body_orn)
+
+ if self.vr_settings.using_tracked_body:
+ if torso_is_valid:
+ des_body_pos, des_body_orn = torso_pos, torso_orn
+ else:
+ des_body_pos, des_body_orn = prev_body_pos, prev_body_orn
+ else:
+ if hmd_is_valid:
+ des_body_pos, des_body_orn = hmd_pos, p.getQuaternionFromEuler([0, 0, calc_z_rot_from_right(hmd_r)])
+ else:
+ des_body_pos, des_body_orn = prev_body_pos, prev_body_orn
+
+ body_delta_pos, body_delta_orn = p.multiplyTransforms(
+ inv_prev_body_pos, inv_prev_body_orn, des_body_pos, des_body_orn
+ )
+ action[:3] = np.array(body_delta_pos)
+ action[3:6] = np.array(p.getEulerFromQuaternion(body_delta_orn))
+
+ # Get new body position so we can calculate correct relative transforms for other VR objects
+ clipped_body_delta_pos, clipped_body_delta_orn = vr_body.clip_delta_pos_orn(action[:3], action[3:6])
+ clipped_body_delta_orn = p.getQuaternionFromEuler(clipped_body_delta_orn)
+ new_body_pos, new_body_orn = p.multiplyTransforms(
+ prev_body_pos, prev_body_orn, clipped_body_delta_pos, clipped_body_delta_orn
+ )
+ # Also calculate its inverse for further local transform calculations
+ inv_new_body_pos, inv_new_body_orn = p.invertTransform(new_body_pos, new_body_orn)
+
+ # Update action space for other VR objects
+ body_relative_parts = ["right", "left", "eye"]
+ for part_name in body_relative_parts:
+ vr_part = (
+ self.main_vr_robot.parts[part_name]
+ if part_name == "eye"
+ else self.main_vr_robot.parts["{}_hand".format(part_name)]
+ )
+
+ # Process local transform adjustments
+ prev_world_pos, prev_world_orn = vr_part.get_position_orientation()
+ prev_local_pos, prev_local_orn = vr_part.local_pos, vr_part.local_orn
+ _, inv_prev_local_orn = p.invertTransform(prev_local_pos, prev_local_orn)
+ if part_name == "eye":
+ valid, world_pos, world_orn = hmd_is_valid, hmd_pos, hmd_orn
+ else:
+ valid, world_pos, _ = v.query("{}_controller".format(part_name))[:3]
+ # Need rotation of the model so it will appear aligned with the physical controller in VR
+ world_orn = v.query("{}_controller".format(part_name))[6]
+
+ # Keep in same world position as last frame if controller/tracker data is not valid
+ if not valid:
+ world_pos, world_orn = prev_world_pos, prev_world_orn
+
+ # Get desired local position and orientation transforms
+ des_local_pos, des_local_orn = p.multiplyTransforms(
+ inv_new_body_pos, inv_new_body_orn, world_pos, world_orn
+ )
+
+ # Get the delta local orientation in the reference frame of the body
+ _, delta_local_orn = p.multiplyTransforms(
+ [0, 0, 0],
+ des_local_orn,
+ [0, 0, 0],
+ inv_prev_local_orn,
+ )
+ delta_local_orn = p.getEulerFromQuaternion(delta_local_orn)
+
+ # Get the delta local position in the reference frame of the body
+ delta_local_pos = np.array(des_local_pos) - np.array(prev_local_pos)
+
+ if part_name == "eye":
+ action[6:9] = np.array(delta_local_pos)
+ action[9:12] = np.array(delta_local_orn)
+ elif part_name == "left":
+ action[12:15] = np.array(delta_local_pos)
+ action[15:18] = np.array(delta_local_orn)
+ else:
+ action[20:23] = np.array(delta_local_pos)
+ action[23:26] = np.array(delta_local_orn)
+
+ # Process trigger fraction and reset for controllers
+ if part_name in ["right", "left"]:
+ prev_trig_frac = vr_part.trigger_fraction
+ if valid:
+ trig_frac = v.query("{}_controller_button".format(part_name))[0]
+ delta_trig_frac = trig_frac - prev_trig_frac
+ else:
+ delta_trig_frac = 0.0
+ if part_name == "left":
+ action[18] = delta_trig_frac
+ else:
+ action[26] = delta_trig_frac
+ # If we reset, action is 1, otherwise 0
+ reset_action = v.query("reset_actions")[0] if part_name == "left" else v.query("reset_actions")[1]
+ reset_action_val = 1.0 if reset_action else 0.0
+ if part_name == "left":
+ action[19] = reset_action_val
+ else:
+ action[27] = reset_action_val
+
+ return action
+
+ def sync_vr_compositor(self):
+ """
+ Sync VR compositor.
+ """
+ self.renderer.vr_compositor_update()
+
+ def perform_vr_start_pos_move(self):
+ """
+ Sets the VR position on the first step iteration where the hmd tracking is valid. Not to be confused
+ with self.set_vr_start_pos, which simply records the desired start position before the simulator starts running.
+ """
+ # Update VR start position if it is not None and the hmd is valid
+ # This will keep checking until we can successfully set the start position
+ if self.vr_start_pos:
+ hmd_is_valid, _, _, _ = self.renderer.vrsys.getDataForVRDevice("hmd")
+ if hmd_is_valid:
+ offset_to_start = np.array(self.vr_start_pos) - self.get_hmd_world_pos()
+ if self.vr_height_offset is not None:
+ offset_to_start[2] = self.vr_height_offset
+ self.set_vr_offset(offset_to_start)
+ self.vr_start_pos = None
+
+ def fix_eye_tracking_value(self):
+ """
+ Calculates and fixes eye tracking data to its value during step(). This is necessary, since multiple
+ calls to get eye tracking data return different results, due to the SRAnipal multithreaded loop that
+ runs in parallel to the iGibson main thread
+ """
+ self.eye_tracking_data = self.renderer.vrsys.getEyeTrackingData()
+
def gen_assisted_grasping_categories(self):
"""
- Generate a list of categories that can be grasped using assisted grasping,
+ Generates list of categories that can be grasped using assisted grasping,
using labels provided in average category specs file.
"""
- assisted_grasp_category_allow_list = set()
avg_category_spec = get_ig_avg_category_specs()
for k, v in avg_category_spec.items():
if v["enable_ag"]:
- assisted_grasp_category_allow_list.add(k)
- return assisted_grasp_category_allow_list
+ self.assist_grasp_category_allow_list.add(k)
def can_assisted_grasp(self, body_id, c_link):
"""
- Check whether an object with the given body_id can be grasped. This is done
+ Checks to see if an object with the given body_id can be grasped. This is done
by checking its category to see if is in the allowlist.
-
- :param body_id: pybullet body id
- :param c_link: link index or -1 for the base
"""
if (
not hasattr(self.scene, "objects_by_id")
@@ -505,50 +1467,391 @@ def can_assisted_grasp(self, body_id, c_link):
else:
return self.scene.objects_by_id[body_id].category in self.assist_grasp_category_allow_list
+ def poll_vr_events(self):
+ """
+ Returns VR event data as list of lists.
+ List is empty if all events are invalid. Components of a single event:
+ controller: 0 (left_controller), 1 (right_controller)
+ button_idx: any valid idx in EVRButtonId enum in openvr.h header file
+ press: 0 (unpress), 1 (press)
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ self.vr_event_data = self.renderer.vrsys.pollVREvents()
+ # Enforce store_first_button_press_per_frame option, if user has enabled it
+ if self.vr_settings.store_only_first_event_per_button:
+ temp_event_data = []
+ # Make sure we only store the first (button, press) combo of each type
+ event_set = set()
+ for ev_data in self.vr_event_data:
+ controller, button_idx, _ = ev_data
+ key = (controller, button_idx)
+ if key not in event_set:
+ temp_event_data.append(ev_data)
+ event_set.add(key)
+ self.vr_event_data = temp_event_data[:]
+
+ return self.vr_event_data
+
+ def get_vr_events(self):
+ """
+ Returns the VR events processed by the simulator
+ """
+ return self.vr_event_data
+
+ def get_button_for_action(self, action):
+ """
+ Returns (button, state) tuple corresponding to an action
+ :param action: an action name listed in "action_button_map" dictionary for the current device in the vr_config.yml
+ """
+ return (
+ None
+ if action not in self.vr_settings.action_button_map
+ else tuple(self.vr_settings.action_button_map[action])
+ )
+
+ def query_vr_event(self, controller, action):
+ """
+ Queries system for a VR event, and returns true if that event happened this frame
+ :param controller: device to query for - can be left_controller or right_controller
+ :param action: an action name listed in "action_button_map" dictionary for the current device in the vr_config.yml
+ """
+ # Return false if any of input parameters are invalid
+ if (
+ controller not in ["left_controller", "right_controller"]
+ or action not in self.vr_settings.action_button_map.keys()
+ ):
+ return False
+
+ # Search through event list to try to find desired event
+ controller_id = 0 if controller == "left_controller" else 1
+ button_idx, press_id = self.vr_settings.action_button_map[action]
+ for ev_data in self.vr_event_data:
+ if controller_id == ev_data[0] and button_idx == ev_data[1] and press_id == ev_data[2]:
+ return True
+
+ # Return false if event was not found this frame
+ return False
+
+ def get_data_for_vr_device(self, device_name):
+ """
+ Call this after step - returns all VR device data for a specific device
+ Returns is_valid (indicating validity of data), translation and rotation in Gibson world space
+ :param device_name: can be hmd, left_controller or right_controller
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ # Use fourth variable in list to get actual hmd position in space
+ is_valid, translation, rotation, _ = self.renderer.vrsys.getDataForVRDevice(device_name)
+ if not is_valid:
+ translation = np.array([0, 0, 0])
+ rotation = np.array([0, 0, 0, 1])
+ return [is_valid, translation, rotation]
+
+ def get_data_for_vr_tracker(self, tracker_serial_number):
+ """
+ Returns the data for a tracker with a specific serial number. This number can be found
+ by looking in the SteamVR device information.
+ :param tracker_serial_number: the serial number of the tracker
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ if not tracker_serial_number:
+ return [False, [0, 0, 0], [0, 0, 0, 0]]
+
+ tracker_data = self.renderer.vrsys.getDataForVRTracker(tracker_serial_number)
+ # Set is_valid to false, and assume the user will check for invalid data
+ if not tracker_data:
+ return [False, np.array([0, 0, 0]), np.array([0, 0, 0, 1])]
+
+ is_valid, translation, rotation = tracker_data
+ return [is_valid, translation, rotation]
+
+ def get_hmd_world_pos(self):
+ """
+ Get world position of HMD without offset
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ _, _, _, hmd_world_pos = self.renderer.vrsys.getDataForVRDevice("hmd")
+ return hmd_world_pos
+
+ def get_button_data_for_controller(self, controller_name):
+ """
+ Call this after getDataForVRDevice - returns analog data for a specific controller
+ Returns trigger_fraction, touchpad finger position x, touchpad finger position y
+ Data is only valid if isValid is true from previous call to getDataForVRDevice
+ Trigger data: 1 (closed) <------> 0 (open)
+ Analog data: X: -1 (left) <-----> 1 (right) and Y: -1 (bottom) <------> 1 (top)
+ :param controller_name: one of left_controller or right_controller
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ # Test for validity when acquiring button data
+ if self.get_data_for_vr_device(controller_name)[0]:
+ trigger_fraction, touch_x, touch_y = self.renderer.vrsys.getButtonDataForController(controller_name)
+ else:
+ trigger_fraction, touch_x, touch_y = 0.0, 0.0, 0.0
+ return [trigger_fraction, touch_x, touch_y]
+
+ def get_scroll_input(self):
+ """
+ Gets scroll input. This uses the non-movement-controller, and determines whether
+ the user wants to scroll by testing if they have pressed the touchpad, while keeping
+ their finger on the left/right of the pad. Return True for up and False for down (-1 for no scroll)
+ """
+ mov_controller = self.vr_settings.movement_controller
+ other_controller = "right" if mov_controller == "left" else "left"
+ other_controller = "{}_controller".format(other_controller)
+ # Data indicating whether user has pressed top or bottom of the touchpad
+ _, touch_x, _ = self.renderer.vrsys.getButtonDataForController(other_controller)
+ # Detect no touch in extreme regions of x axis
+ if touch_x > 0.7 and touch_x <= 1.0:
+ return 1
+ elif touch_x < -0.7 and touch_x >= -1.0:
+ return 0
+ else:
+ return -1
+
+ def get_eye_tracking_data(self):
+ """
+ Returns eye tracking data as list of lists. Order: is_valid, gaze origin, gaze direction, gaze point,
+ left pupil diameter, right pupil diameter (both in millimeters)
+ Call after getDataForVRDevice, to guarantee that latest HMD transform has been acquired
+ """
+ is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = self.eye_tracking_data
+ # Set other values to 0 to avoid very small/large floating point numbers
+ if not is_valid:
+ return [False, [0, 0, 0], [0, 0, 0], 0, 0]
+ else:
+ return [is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter]
+
+ def set_vr_start_pos(self, start_pos=None, vr_height_offset=None):
+ """
+ Sets the starting position of the VR system in iGibson space
+ :param start_pos: position to start VR system at
+ :param vr_height_offset: starting height offset. If None, uses absolute height from start_pos
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ # The VR headset will actually be set to this position during the first frame.
+ # This is because we need to know where the headset is in space when it is first picked
+ # up to set the initial offset correctly.
+ self.vr_start_pos = start_pos
+ # This value can be set to specify a height offset instead of an absolute height.
+ # We might want to adjust the height of the camera based on the height of the person using VR,
+ # but still offset this height. When this option is not None it offsets the height by the amount
+ # specified instead of overwriting the VR system height output.
+ self.vr_height_offset = vr_height_offset
+
+ def set_vr_pos(self, pos=None, keep_height=False):
+ """
+ Sets the world position of the VR system in iGibson space
+ :param pos: position to set VR system to
+ :param keep_height: whether the current VR height should be kept
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ offset_to_pos = np.array(pos) - self.get_hmd_world_pos()
+ if keep_height:
+ curr_offset_z = self.get_vr_offset()[2]
+ self.set_vr_offset([offset_to_pos[0], offset_to_pos[1], curr_offset_z])
+ else:
+ self.set_vr_offset(offset_to_pos)
+
+ def get_vr_pos(self):
+ """
+ Gets the world position of the VR system in iGibson space.
+ """
+ return self.get_hmd_world_pos() + np.array(self.get_vr_offset())
+
+ def set_vr_offset(self, pos=None):
+ """
+ Sets the translational offset of the VR system (HMD, left controller, right controller) from world space coordinates.
+ Can be used for many things, including adjusting height and teleportation-based movement
+ :param pos: must be a list of three floats, corresponding to x, y, z in Gibson coordinate space
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ self.renderer.vrsys.setVROffset(-pos[1], pos[2], -pos[0])
+
+ def get_vr_offset(self):
+ """
+ Gets the current VR offset vector in list form: x, y, z (in iGibson coordinates)
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ x, y, z = self.renderer.vrsys.getVROffset()
+ return [x, y, z]
+
+ def get_device_coordinate_system(self, device):
+ """
+ Gets the direction vectors representing the device's coordinate system in list form: x, y, z (in Gibson coordinates)
+ List contains "right", "up" and "forward" vectors in that order
+ :param device: can be one of "hmd", "left_controller" or "right_controller"
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+
+ vec_list = []
+
+ coordinate_sys = self.renderer.vrsys.getDeviceCoordinateSystem(device)
+ for dir_vec in coordinate_sys:
+ vec_list.append(dir_vec)
+
+ return vec_list
+
+ def trigger_haptic_pulse(self, device, strength):
+ """
+ Triggers a haptic pulse of the specified strength (0 is weakest, 1 is strongest)
+ :param device: device to trigger haptic for - can be any one of [left_controller, right_controller]
+ :param strength: strength of haptic pulse (0 is weakest, 1 is strongest)
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+ assert device in ["left_controller", "right_controller"]
+
+ self.renderer.vrsys.triggerHapticPulseForDevice(device, int(self.max_haptic_duration * strength))
+
def set_hidden_state(self, obj, hide=True):
"""
- Set the hidden state of an object to be either hidden or not hidden.
- The object passed in must inherent from Object at the top level.
+ Sets the hidden state of an object to be either hidden or not hidden.
+ The object passed in must inherent from Object at the top level
- Note: this function must be called after step() in the rendering loop.
+ Note: this function must be called after step() in the rendering loop
Note 2: this function only works with the optimized renderer - please use the renderer hidden
- list to hide objects in the non-optimized renderer.
-
- :param obj: an object to set the hidden state
- :param hide: the hidden state to set
+ list to hide objects in the non-optimized renderer
"""
# Find instance corresponding to this id in the renderer
for instance in self.renderer.instances:
- if obj.get_body_id() == instance.pybullet_uuid:
+ if obj.body_id == instance.pybullet_uuid:
instance.hidden = hide
self.renderer.update_hidden_highlight_state([instance])
return
- @staticmethod
- def update_position(instance, force_sync=False):
+ def set_hud_state(self, state):
+ """
+ Sets state of the VR HUD (heads-up-display)
+ :param state: one of 'show' or 'hide'
+ """
+ if not self.can_access_vr_context:
+ raise RuntimeError("ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!")
+ if self.renderer.vr_hud:
+ self.renderer.vr_hud.set_overlay_state(state)
+
+ def get_hidden_state(self, obj):
+ """
+ Returns the current hidden state of the object - hidden (True) or not hidden (False)
+ """
+ for instance in self.renderer.instances:
+ if obj.body_id == instance.pybullet_uuid:
+ return instance.hidden
+
+ def get_category_ids(self, category_name):
+ """
+ Gets ids for all instances of a specific category (floors, walls, etc.) in a scene
"""
- Update the position of an object or a robot in renderer.
+ if not hasattr(self.scene, "objects_by_id"):
+ return []
+ return [
+ body_id
+ for body_id in self.objects
+ if (
+ body_id in self.scene.objects_by_id.keys()
+ and hasattr(self.scene.objects_by_id[body_id], "category")
+ and self.scene.objects_by_id[body_id].category == category_name
+ )
+ ]
- :param instance: an instance in the renderer
- :param force_sync: whether to force sync the object
+ def update_position(self, instance, force_sync=False):
+ """
+ Update position for an object or a robot in renderer.
+ :param instance: Instance in the renderer
"""
body_links_awake = 0
- for j, link_id in enumerate(instance.link_ids):
- dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, link_id)
- if len(dynamics_info) == 13 and not force_sync:
+ if isinstance(instance, Instance):
+ dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, -1)
+ if len(dynamics_info) == 13 and not self.first_sync and not force_sync:
activation_state = dynamics_info[12]
else:
activation_state = PyBulletSleepState.AWAKE
if activation_state != PyBulletSleepState.AWAKE:
- continue
+ return body_links_awake
+ # pos and orn of the inertial frame of the base link,
+ # instead of the base link frame
+ pos, orn = p.getBasePositionAndOrientation(instance.pybullet_uuid)
+
+ # Need to convert to the base link frame because that is
+ # what our own renderer keeps track of
+ # Based on pyullet docuementation:
+ # urdfLinkFrame = comLinkFrame * localInertialFrame.inverse().
+
+ instance.set_position(pos)
+ instance.set_rotation(quat2rotmat(xyzw2wxyz(orn)))
+ body_links_awake += 1
+ elif isinstance(instance, InstanceGroup):
+ for j, link_id in enumerate(instance.link_ids):
+ if link_id == -1:
+ dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, -1)
+ if len(dynamics_info) == 13 and not self.first_sync:
+ activation_state = dynamics_info[12]
+ else:
+ activation_state = PyBulletSleepState.AWAKE
+
+ if activation_state != PyBulletSleepState.AWAKE:
+ continue
+ # same conversion is needed as above
+ pos, orn = p.getBasePositionAndOrientation(instance.pybullet_uuid)
- if link_id == PYBULLET_BASE_LINK_INDEX:
- pos, orn = p.getBasePositionAndOrientation(instance.pybullet_uuid)
- else:
- pos, orn = p.getLinkState(instance.pybullet_uuid, link_id)[:2]
+ else:
+ dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, link_id)
- instance.set_position_for_part(xyz2mat(pos), j)
- instance.set_rotation_for_part(quat2rotmat(xyzw2wxyz(orn)), j)
- body_links_awake += 1
+ if len(dynamics_info) == 13 and not self.first_sync:
+ activation_state = dynamics_info[12]
+ else:
+ activation_state = PyBulletSleepState.AWAKE
+
+ if activation_state != PyBulletSleepState.AWAKE:
+ continue
+
+ pos, orn = p.getLinkState(instance.pybullet_uuid, link_id)[:2]
+
+ instance.set_position_for_part(xyz2mat(pos), j)
+ instance.set_rotation_for_part(quat2rotmat(xyzw2wxyz(orn)), j)
+ body_links_awake += 1
return body_links_awake
+
+ def isconnected(self):
+ """
+ :return: pybullet is alive
+ """
+ return p.getConnectionInfo(self.cid)["isConnected"]
+
+ def disconnect(self):
+ """
+ Clean up the simulator
+ """
+ if self.isconnected():
+ # print("******************PyBullet Logging Information:")
+ p.resetSimulation(physicsClientId=self.cid)
+ p.disconnect(self.cid)
+ # print("PyBullet Logging Information******************")
+ self.renderer.release()
+
+ def disconnect_pybullet(self):
+ """
+ Disconnects only pybullet - used for multi-user VR
+ """
+ if self.isconnected():
+ p.resetSimulation(physicsClientId=self.cid)
+ p.disconnect(self.cid)
diff --git a/igibson/simulator_vr.py b/igibson/simulator_vr.py
deleted file mode 100644
index 0eb1f1dcf..000000000
--- a/igibson/simulator_vr.py
+++ /dev/null
@@ -1,758 +0,0 @@
-import ctypes
-import platform
-import time
-from time import sleep
-
-import numpy as np
-import pybullet as p
-
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR, VrSettings
-from igibson.render.viewer import ViewerVR
-from igibson.simulator import Simulator
-from igibson.utils.vr_utils import VR_CONTROLLERS, VR_DEVICES, VrData, calc_offset, calc_z_rot_from_right
-
-
-class SimulatorVR(Simulator):
- """
- Simulator class is a wrapper of physics simulator (pybullet) and MeshRenderer, it loads objects into
- both pybullet and also MeshRenderer and syncs the pose of objects and robot parts.
- """
-
- def __init__(
- self,
- gravity=9.8,
- physics_timestep=1 / 120.0,
- render_timestep=1 / 30.0,
- solver_iterations=100,
- mode="vr",
- image_width=128,
- image_height=128,
- vertical_fov=90,
- device_idx=0,
- rendering_settings=MeshRendererSettings(),
- vr_settings=VrSettings(),
- use_pb_gui=False,
- ):
- """
- :param gravity: gravity on z direction.
- :param physics_timestep: timestep of physical simulation, p.stepSimulation()
- :param render_timestep: timestep of rendering, and Simulator.step() function
- :param solver_iterations: number of solver iterations to feed into pybullet, can be reduced to increase speed.
- pybullet default value is 50.
- :param use_variable_step_num: whether to use a fixed (1) or variable physics step number
- :param mode: choose mode from headless, headless_tensor, gui_interactive, gui_non_interactive
- :param image_width: width of the camera image
- :param image_height: height of the camera image
- :param vertical_fov: vertical field of view of the camera image in degrees
- :param device_idx: GPU device index to run rendering on
- :param rendering_settings: settings to use for mesh renderer
- :param vr_settings: settings to use for VR in simulator and MeshRendererVR
- :param use_pb_gui: concurrently display the interactive pybullet gui (for debugging)
- """
- if platform.system() == "Windows":
- # By default, windows does not provide ms level timing accuracy
- winmm = ctypes.WinDLL("winmm") # type: ignore
- winmm.timeBeginPeriod(1)
-
- # Blend highlight for VR overlay
- rendering_settings.blend_highlight = True
-
- # Starting position for the VR (default set to None if no starting position is specified by the user)
- self.vr_settings = vr_settings
- self.vr_overlay_initialized = False
- self.vr_start_pos = None
- self.max_haptic_duration = 4000
-
- # Duration of a vsync frame - assumes 90Hz refresh rate
- self.vsync_frame_dur = 11.11e-3
- # Timing variables for functions called outside of step() that also take up frame time
- self.frame_end_time = None
-
- # Variables for data saving and replay in VR
- self.last_physics_timestep = -1
- self.last_render_timestep = -1
- self.last_physics_step_num = -1
- self.last_frame_dur = -1
-
- super().__init__(
- gravity,
- physics_timestep,
- render_timestep,
- solver_iterations,
- mode,
- image_width,
- image_height,
- vertical_fov,
- device_idx,
- rendering_settings,
- use_pb_gui,
- )
-
- # Get expected number of vsync frames per iGibson frame Note: currently assumes a 90Hz VR system
- self.vsync_frame_num = int(round(self.render_timestep / self.vsync_frame_dur))
-
- # Total amount of time we want non-blocking actions to take each frame
- # Leave a small amount of time before the last vsync, just in case we overrun
- self.non_block_frame_time = (self.vsync_frame_num - 1) * self.vsync_frame_dur + (
- 5e-3 if self.vr_settings.curr_device == "OCULUS" else 10e-3
- )
-
- def initialize_renderer(self):
- self.visual_objects = {}
- self.renderer = MeshRendererVR(
- rendering_settings=self.rendering_settings, vr_settings=self.vr_settings, simulator=self
- )
- self.viewer = ViewerVR(
- self.vr_settings.use_companion_window,
- frame_save_path=self.vr_settings.frame_save_path,
- renderer=self.renderer,
- )
-
- def add_vr_overlay_text(
- self,
- text_data="PLACEHOLDER: PLEASE REPLACE!",
- font_name="OpenSans",
- font_style="Regular",
- font_size=48,
- color=[0, 0, 0],
- pos=[20, 80],
- size=[70, 80],
- scale=1.0,
- background_color=[1, 1, 1, 0.8],
- ):
- """
- Creates Text for use in a VR overlay. Returns the text object to the caller,
- so various settings can be changed - eg. text content, position, scale, etc.
- :param text_data: starting text to display (can be changed at a later time by set_text)
- :param font_name: name of font to render - same as font folder in iGibson assets
- :param font_style: style of font - one of [regular, italic, bold]
- :param font_size: size of font to render
- :param color: [r, g, b] color
- :param pos: [x, y] position of top-left corner of text box, in percentage across screen
- :param size: [w, h] size of text box in percentage across screen-space axes
- :param scale: scale factor for resizing text
- :param background_color: color of the background in form [r, g, b, a] - default is semi-transparent white so text is easy to read in VR
- """
- if not self.vr_overlay_initialized:
- # This function automatically creates a VR text overlay the first time text is added
- self.renderer.gen_vr_hud()
- self.vr_overlay_initialized = True
-
- # Note: For pos/size - (0,0) is bottom-left and (100, 100) is top-right
- # Calculate pixel positions for text
- pixel_pos = [int(pos[0] / 100.0 * self.renderer.width), int(pos[1] / 100.0 * self.renderer.height)]
- pixel_size = [int(size[0] / 100.0 * self.renderer.width), int(size[1] / 100.0 * self.renderer.height)]
- return self.renderer.add_text(
- text_data=text_data,
- font_name=font_name,
- font_style=font_style,
- font_size=font_size,
- color=color,
- pixel_pos=pixel_pos,
- pixel_size=pixel_size,
- scale=scale,
- background_color=background_color,
- render_to_tex=True,
- )
-
- def add_overlay_image(self, image_fpath, width=1, pos=[0, 0, -1]):
- """
- Add an image with a given file path to the VR overlay. This image will be displayed
- in addition to any text that the users wishes to display. This function returns a handle
- to the VrStaticImageOverlay, so the user can display/hide it at will.
- """
- return self.renderer.gen_static_overlay(image_fpath, width=width, pos=pos)
-
- def set_hud_show_state(self, show_state):
- """
- Shows/hides the main VR HUD.
- :param show_state: whether to show HUD or not
- """
- self.renderer.vr_hud.set_overlay_show_state(show_state)
-
- def get_hud_show_state(self):
- """
- Returns the show state of the main VR HUD.
- """
- return self.renderer.vr_hud.get_overlay_show_state()
-
- def step_vr_system(self):
- # Update VR compositor and VR data
- vr_system_start = time.perf_counter()
- # First sync VR compositor - this is where Oculus blocks (as opposed to Vive, which blocks in update_vr_data)
- self.sync_vr_compositor()
- # Note: this should only be called once per frame - use get_vr_events to read the event data list in
- # subsequent read operations
- self.poll_vr_events()
- # This is necessary to fix the eye tracking value for the current frame, since it is multi-threaded
- self.fix_eye_tracking_value()
- # Move user to their starting location
- self.perform_vr_start_pos_move()
- # Update VR data and wait until 3ms before the next vsync
- self.renderer.update_vr_data()
- # Update VR system data - eg. offsets, haptics, etc.
- self.vr_system_update()
- vr_system_dur = time.perf_counter() - vr_system_start
- return vr_system_dur
-
- def step(self, print_stats=False):
- """
- Step the simulation when using VR. Order of function calls:
- 1) Simulate physics
- 2) Render frame
- 3) Submit rendered frame to VR compositor
- 4) Update VR data for use in the next frame
- """
- assert (
- self.scene is not None
- ), "A scene must be imported before running the simulator. Use EmptyScene for an empty scene."
-
- # Calculate time outside of step
- outside_step_dur = 0
- if self.frame_end_time is not None:
- outside_step_dur = time.perf_counter() - self.frame_end_time
- # Simulate Physics in PyBullet
- physics_start_time = time.perf_counter()
- for _ in range(self.physics_timestep_num):
- p.stepSimulation()
- physics_dur = time.perf_counter() - physics_start_time
-
- non_physics_start_time = time.perf_counter()
- self._non_physics_step()
- non_physics_dur = time.perf_counter() - non_physics_start_time
-
- # Sync PyBullet bodies to renderer and then render to Viewer
- render_start_time = time.perf_counter()
- self.sync()
- render_dur = time.perf_counter() - render_start_time
-
- # Sleep until last possible Vsync
- pre_sleep_dur = outside_step_dur + physics_dur + non_physics_dur + render_dur
- sleep_start_time = time.perf_counter()
- if pre_sleep_dur < self.non_block_frame_time:
- sleep(self.non_block_frame_time - pre_sleep_dur)
- sleep_dur = time.perf_counter() - sleep_start_time
-
- vr_system_dur = self.step_vr_system()
-
- # Calculate final frame duration
- # Make sure it is non-zero for FPS calculation (set to max of 1000 if so)
- frame_dur = max(1e-3, pre_sleep_dur + sleep_dur + vr_system_dur)
-
- # Set variables for data saving and replay
- self.last_physics_timestep = physics_dur
- self.last_render_timestep = render_dur
- self.last_frame_dur = frame_dur
-
- if print_stats:
- print("Frame number {} statistics (ms)".format(self.frame_count))
- print("Total out-of-step duration: {}".format(outside_step_dur * 1000))
- print("Total physics duration: {}".format(physics_dur * 1000))
- print("Total non-physics duration: {}".format(non_physics_dur * 1000))
- print("Total render duration: {}".format(render_dur * 1000))
- print("Total sleep duration: {}".format(sleep_dur * 1000))
- print("Total VR system duration: {}".format(vr_system_dur * 1000))
- print("Total frame duration: {} and fps: {}".format(frame_dur * 1000, 1 / frame_dur))
- print(
- "Realtime factor: {}".format(round((self.physics_timestep_num * self.physics_timestep) / frame_dur, 3))
- )
- print("-------------------------")
-
- self.frame_count += 1
- self.frame_end_time = time.perf_counter()
-
- def vr_system_update(self):
- """
- Updates the VR system for a single frame. This includes moving the vr offset,
- adjusting the user's height based on button input, and triggering haptics.
- """
- # Update VR offset using appropriate controller
- if self.vr_settings.touchpad_movement:
- vr_offset_device = "{}_controller".format(self.vr_settings.movement_controller)
- is_valid, _, _ = self.get_data_for_vr_device(vr_offset_device)
- if is_valid:
- _, touch_x, touch_y = self.get_button_data_for_controller(vr_offset_device)
- new_offset = calc_offset(
- self, touch_x, touch_y, self.vr_settings.movement_speed, self.vr_settings.relative_movement_device
- )
- self.set_vr_offset(new_offset)
-
- # Adjust user height based on y-axis (vertical direction) touchpad input
- vr_height_device = "left_controller" if self.vr_settings.movement_controller == "right" else "right_controller"
- is_height_valid, _, _ = self.get_data_for_vr_device(vr_height_device)
- if is_height_valid:
- curr_offset = self.get_vr_offset()
- hmd_height = self.get_hmd_world_pos()[2]
- _, _, height_y = self.get_button_data_for_controller(vr_height_device)
- if height_y < -0.7:
- vr_z_offset = -0.01
- if hmd_height + curr_offset[2] + vr_z_offset >= self.vr_settings.height_bounds[0]:
- self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])
- elif height_y > 0.7:
- vr_z_offset = 0.01
- if hmd_height + curr_offset[2] + vr_z_offset <= self.vr_settings.height_bounds[1]:
- self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])
-
- # Update haptics for body and hands
- if self.main_vr_robot:
- vr_body_id = self.main_vr_robot.links["body"].get_body_id()
- vr_hands = [
- ("left_controller", self.main_vr_robot.links["left_hand"]),
- ("right_controller", self.main_vr_robot.links["right_hand"]),
- ]
-
- # Check for body haptics
- wall_ids = [x.get_body_id() for x in self.scene.objects_by_category["walls"]]
- for c_info in p.getContactPoints(vr_body_id):
- if wall_ids and (c_info[1] in wall_ids or c_info[2] in wall_ids):
- for controller in ["left_controller", "right_controller"]:
- is_valid, _, _ = self.get_data_for_vr_device(controller)
- if is_valid:
- # Use 90% strength for body to warn user of collision with wall
- self.trigger_haptic_pulse(controller, 0.9)
-
- # Check for hand haptics
- for hand_device, hand_obj in vr_hands:
- is_valid, _, _ = self.get_data_for_vr_device(hand_device)
- if is_valid:
- if len(p.getContactPoints(hand_obj.get_body_id())) > 0 or (
- hasattr(hand_obj, "object_in_hand") and hand_obj.object_in_hand
- ):
- # Only use 30% strength for normal collisions, to help add realism to the experience
- self.trigger_haptic_pulse(hand_device, 0.3)
-
- def register_main_vr_robot(self, vr_robot):
- """
- Register the robot representing the VR user.
- """
- self.main_vr_robot = vr_robot
-
- def gen_vr_data(self):
- """
- Generates a VrData object containing all of the data required to describe the VR system in the current frame.
- This data is used to power the BehaviorRobot each frame.
- """
-
- v = dict()
- for device in VR_DEVICES:
- is_valid, trans, rot = self.get_data_for_vr_device(device)
- device_data = [is_valid, trans.tolist(), rot.tolist()]
- device_data.extend(self.get_device_coordinate_system(device))
- v[device] = device_data
- if device in VR_CONTROLLERS:
- v["{}_button".format(device)] = self.get_button_data_for_controller(device)
-
- # Store final rotations of hands, with model rotation applied
- for hand in ["right", "left"]:
- # Base rotation quaternion
- base_rot = self.main_vr_robot.links["{}_hand".format(hand)].base_rot
- # Raw rotation of controller
- controller_rot = v["{}_controller".format(hand)][2]
- # Use dummy translation to calculation final rotation
- final_rot = p.multiplyTransforms([0, 0, 0], controller_rot, [0, 0, 0], base_rot)[1]
- v["{}_controller".format(hand)].append(final_rot)
-
- is_valid, torso_trans, torso_rot = self.get_data_for_vr_tracker(self.vr_settings.torso_tracker_serial)
- v["torso_tracker"] = [is_valid, torso_trans, torso_rot]
- v["eye_data"] = self.get_eye_tracking_data()
- v["event_data"] = self.get_vr_events()
- reset_actions = []
- for controller in VR_CONTROLLERS:
- reset_actions.append(self.query_vr_event(controller, "reset_agent"))
- v["reset_actions"] = reset_actions
- v["vr_positions"] = [self.get_vr_pos().tolist(), list(self.get_vr_offset())]
-
- return VrData(v)
-
- def gen_vr_robot_action(self):
- """
- Generates an action for the BehaviorRobot to perform based on VrData collected this frame.
-
- Action space (all non-normalized values that will be clipped if they are too large)
- * See BehaviorRobot.py for details on the clipping thresholds for
- Body:
- - 6DOF pose delta - relative to body frame from previous frame
- Eye:
- - 6DOF pose delta - relative to body frame (where the body will be after applying this frame's action)
- Left hand, right hand (in that order):
- - 6DOF pose delta - relative to body frame (same as above)
- - Trigger fraction delta
- - Action reset value
-
- Total size: 28
- """
- # Actions are stored as 1D numpy array
- action = np.zeros((28,))
-
- # Get VrData for the current frame
- v = self.gen_vr_data()
-
- # Update body action space
- hmd_is_valid, hmd_pos, hmd_orn, hmd_r = v.query("hmd")[:4]
- torso_is_valid, torso_pos, torso_orn = v.query("torso_tracker")
- vr_body = self.main_vr_robot.links["body"]
- prev_body_pos, prev_body_orn = vr_body.get_position_orientation()
- inv_prev_body_pos, inv_prev_body_orn = p.invertTransform(prev_body_pos, prev_body_orn)
-
- if self.vr_settings.using_tracked_body:
- if torso_is_valid:
- des_body_pos, des_body_orn = torso_pos, torso_orn
- else:
- des_body_pos, des_body_orn = prev_body_pos, prev_body_orn
- else:
- if hmd_is_valid:
- des_body_pos, des_body_orn = hmd_pos, p.getQuaternionFromEuler([0, 0, calc_z_rot_from_right(hmd_r)])
- else:
- des_body_pos, des_body_orn = prev_body_pos, prev_body_orn
-
- body_delta_pos, body_delta_orn = p.multiplyTransforms(
- inv_prev_body_pos, inv_prev_body_orn, des_body_pos, des_body_orn
- )
- action[:3] = np.array(body_delta_pos)
- action[3:6] = np.array(p.getEulerFromQuaternion(body_delta_orn))
-
- # Get new body position so we can calculate correct relative transforms for other VR objects
- clipped_body_delta_pos, clipped_body_delta_orn = vr_body.clip_delta_pos_orn(action[:3], action[3:6])
- clipped_body_delta_orn = p.getQuaternionFromEuler(clipped_body_delta_orn)
- new_body_pos, new_body_orn = p.multiplyTransforms(
- prev_body_pos, prev_body_orn, clipped_body_delta_pos, clipped_body_delta_orn
- )
- # Also calculate its inverse for further local transform calculations
- inv_new_body_pos, inv_new_body_orn = p.invertTransform(new_body_pos, new_body_orn)
-
- # Update action space for other VR objects
- body_relative_parts = ["right", "left", "eye"]
- for part_name in body_relative_parts:
- vr_part = (
- self.main_vr_robot.links[part_name]
- if part_name == "eye"
- else self.main_vr_robot.links["{}_hand".format(part_name)]
- )
-
- # Process local transform adjustments
- prev_world_pos, prev_world_orn = vr_part.get_position_orientation()
- prev_local_pos, prev_local_orn = vr_part.get_local_position_orientation()
- _, inv_prev_local_orn = p.invertTransform(prev_local_pos, prev_local_orn)
- if part_name == "eye":
- valid, world_pos, world_orn = hmd_is_valid, hmd_pos, hmd_orn
- else:
- valid, world_pos, _ = v.query("{}_controller".format(part_name))[:3]
- # Need rotation of the model so it will appear aligned with the physical controller in VR
- world_orn = v.query("{}_controller".format(part_name))[6]
-
- # Keep in same world position as last frame if controller/tracker data is not valid
- if not valid:
- world_pos, world_orn = prev_world_pos, prev_world_orn
-
- # Get desired local position and orientation transforms
- des_local_pos, des_local_orn = p.multiplyTransforms(
- inv_new_body_pos, inv_new_body_orn, world_pos, world_orn
- )
-
- # Get the delta local orientation in the reference frame of the body
- _, delta_local_orn = p.multiplyTransforms(
- [0, 0, 0],
- des_local_orn,
- [0, 0, 0],
- inv_prev_local_orn,
- )
- delta_local_orn = p.getEulerFromQuaternion(delta_local_orn)
-
- # Get the delta local position in the reference frame of the body
- delta_local_pos = np.array(des_local_pos) - np.array(prev_local_pos)
-
- if part_name == "eye":
- action[6:9] = np.array(delta_local_pos)
- action[9:12] = np.array(delta_local_orn)
- elif part_name == "left":
- action[12:15] = np.array(delta_local_pos)
- action[15:18] = np.array(delta_local_orn)
- else:
- action[20:23] = np.array(delta_local_pos)
- action[23:26] = np.array(delta_local_orn)
-
- # Process trigger fraction and reset for controllers
- if part_name in ["right", "left"]:
- prev_trig_frac = vr_part.trigger_fraction
- if valid:
- trig_frac = v.query("{}_controller_button".format(part_name))[0]
- delta_trig_frac = trig_frac - prev_trig_frac
- else:
- delta_trig_frac = 0.0
- if part_name == "left":
- action[18] = delta_trig_frac
- else:
- action[26] = delta_trig_frac
- # If we reset, action is 1, otherwise 0
- reset_action = v.query("reset_actions")[0] if part_name == "left" else v.query("reset_actions")[1]
- reset_action_val = 1.0 if reset_action else 0.0
- if part_name == "left":
- action[19] = reset_action_val
- else:
- action[27] = reset_action_val
-
- return action
-
- def sync_vr_compositor(self):
- """
- Sync VR compositor.
- """
- self.renderer.vr_compositor_update()
-
- def perform_vr_start_pos_move(self):
- """
- Sets the VR position on the first step iteration where the hmd tracking is valid. Not to be confused
- with self.set_vr_start_pos, which simply records the desired start position before the simulator starts running.
- """
- # Update VR start position if it is not None and the hmd is valid
- # This will keep checking until we can successfully set the start position
- if self.vr_start_pos:
- hmd_is_valid, _, _, _ = self.renderer.vrsys.getDataForVRDevice("hmd")
- if hmd_is_valid:
- offset_to_start = np.array(self.vr_start_pos) - self.get_hmd_world_pos()
- if self.vr_height_offset is not None:
- offset_to_start[2] = self.vr_height_offset
- self.set_vr_offset(offset_to_start)
- self.vr_start_pos = None
-
- def fix_eye_tracking_value(self):
- """
- Calculates and fixes eye tracking data to its value during step(). This is necessary, since multiple
- calls to get eye tracking data return different results, due to the SRAnipal multithreaded loop that
- runs in parallel to the iGibson main thread
- """
- self.eye_tracking_data = self.renderer.vrsys.getEyeTrackingData()
-
- def poll_vr_events(self):
- """
- Returns VR event data as list of lists.
- List is empty if all events are invalid. Components of a single event:
- controller: 0 (left_controller), 1 (right_controller)
- button_idx: any valid idx in EVRButtonId enum in openvr.h header file
- press: 0 (unpress), 1 (press)
- """
-
- self.vr_event_data = self.renderer.vrsys.pollVREvents()
- # Enforce store_first_button_press_per_frame option, if user has enabled it
- if self.vr_settings.store_only_first_event_per_button:
- temp_event_data = []
- # Make sure we only store the first (button, press) combo of each type
- event_set = set()
- for ev_data in self.vr_event_data:
- controller, button_idx, _ = ev_data
- key = (controller, button_idx)
- if key not in event_set:
- temp_event_data.append(ev_data)
- event_set.add(key)
- self.vr_event_data = temp_event_data[:]
-
- return self.vr_event_data
-
- def get_vr_events(self):
- """
- Returns the VR events processed by the simulator
- """
- return self.vr_event_data
-
- def get_button_for_action(self, action):
- """
- Returns (button, state) tuple corresponding to an action
- :param action: an action name listed in "action_button_map" dictionary for the current device in the vr_config.yml
- """
- return (
- None
- if action not in self.vr_settings.action_button_map
- else tuple(self.vr_settings.action_button_map[action])
- )
-
- def query_vr_event(self, controller, action):
- """
- Queries system for a VR event, and returns true if that event happened this frame
- :param controller: device to query for - can be left_controller or right_controller
- :param action: an action name listed in "action_button_map" dictionary for the current device in the vr_config.yml
- """
- # Return false if any of input parameters are invalid
- if (
- controller not in ["left_controller", "right_controller"]
- or action not in self.vr_settings.action_button_map.keys()
- ):
- return False
-
- # Search through event list to try to find desired event
- controller_id = 0 if controller == "left_controller" else 1
- button_idx, press_id = self.vr_settings.action_button_map[action]
- for ev_data in self.vr_event_data:
- if controller_id == ev_data[0] and button_idx == ev_data[1] and press_id == ev_data[2]:
- return True
-
- # Return false if event was not found this frame
- return False
-
- def get_data_for_vr_device(self, device_name):
- """
- Call this after step - returns all VR device data for a specific device
- Returns is_valid (indicating validity of data), translation and rotation in Gibson world space
- :param device_name: can be hmd, left_controller or right_controller
- """
-
- # Use fourth variable in list to get actual hmd position in space
- is_valid, translation, rotation, _ = self.renderer.vrsys.getDataForVRDevice(device_name)
- if not is_valid:
- translation = np.array([0, 0, 0])
- rotation = np.array([0, 0, 0, 1])
- return [is_valid, translation, rotation]
-
- def get_data_for_vr_tracker(self, tracker_serial_number):
- """
- Returns the data for a tracker with a specific serial number. This number can be found
- by looking in the SteamVR device information.
- :param tracker_serial_number: the serial number of the tracker
- """
-
- if not tracker_serial_number:
- return [False, [0, 0, 0], [0, 0, 0, 0]]
-
- tracker_data = self.renderer.vrsys.getDataForVRTracker(tracker_serial_number)
- # Set is_valid to false, and assume the user will check for invalid data
- if not tracker_data:
- return [False, np.array([0, 0, 0]), np.array([0, 0, 0, 1])]
-
- is_valid, translation, rotation = tracker_data
- return [is_valid, translation, rotation]
-
- def get_hmd_world_pos(self):
- """
- Get world position of HMD without offset
- """
-
- _, _, _, hmd_world_pos = self.renderer.vrsys.getDataForVRDevice("hmd")
- return hmd_world_pos
-
- def get_button_data_for_controller(self, controller_name):
- """
- Call this after getDataForVRDevice - returns analog data for a specific controller
- Returns trigger_fraction, touchpad finger position x, touchpad finger position y
- Data is only valid if isValid is true from previous call to getDataForVRDevice
- Trigger data: 1 (closed) <------> 0 (open)
- Analog data: X: -1 (left) <-----> 1 (right) and Y: -1 (bottom) <------> 1 (top)
- :param controller_name: one of left_controller or right_controller
- """
-
- # Test for validity when acquiring button data
- if self.get_data_for_vr_device(controller_name)[0]:
- trigger_fraction, touch_x, touch_y = self.renderer.vrsys.getButtonDataForController(controller_name)
- else:
- trigger_fraction, touch_x, touch_y = 0.0, 0.0, 0.0
- return [trigger_fraction, touch_x, touch_y]
-
- def get_scroll_input(self):
- """
- Gets scroll input. This uses the non-movement-controller, and determines whether
- the user wants to scroll by testing if they have pressed the touchpad, while keeping
- their finger on the left/right of the pad. Return True for up and False for down (-1 for no scroll)
- """
- mov_controller = self.vr_settings.movement_controller
- other_controller = "right" if mov_controller == "left" else "left"
- other_controller = "{}_controller".format(other_controller)
- # Data indicating whether user has pressed top or bottom of the touchpad
- _, touch_x, _ = self.renderer.vrsys.getButtonDataForController(other_controller)
- # Detect no touch in extreme regions of x axis
- if touch_x > 0.7 and touch_x <= 1.0:
- return 1
- elif touch_x < -0.7 and touch_x >= -1.0:
- return 0
- else:
- return -1
-
- def get_eye_tracking_data(self):
- """
- Returns eye tracking data as list of lists. Order: is_valid, gaze origin, gaze direction, gaze point,
- left pupil diameter, right pupil diameter (both in millimeters)
- Call after getDataForVRDevice, to guarantee that latest HMD transform has been acquired
- """
- is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = self.eye_tracking_data
- # Set other values to 0 to avoid very small/large floating point numbers
- if not is_valid:
- return [False, [0, 0, 0], [0, 0, 0], 0, 0]
- else:
- return [is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter]
-
- def set_vr_start_pos(self, start_pos=None, vr_height_offset=None):
- """
- Sets the starting position of the VR system in iGibson space
- :param start_pos: position to start VR system at
- :param vr_height_offset: starting height offset. If None, uses absolute height from start_pos
- """
-
- # The VR headset will actually be set to this position during the first frame.
- # This is because we need to know where the headset is in space when it is first picked
- # up to set the initial offset correctly.
- self.vr_start_pos = start_pos
- # This value can be set to specify a height offset instead of an absolute height.
- # We might want to adjust the height of the camera based on the height of the person using VR,
- # but still offset this height. When this option is not None it offsets the height by the amount
- # specified instead of overwriting the VR system height output.
- self.vr_height_offset = vr_height_offset
-
- def set_vr_pos(self, pos=None, keep_height=False):
- """
- Sets the world position of the VR system in iGibson space
- :param pos: position to set VR system to
- :param keep_height: whether the current VR height should be kept
- """
-
- offset_to_pos = np.array(pos) - self.get_hmd_world_pos()
- if keep_height:
- curr_offset_z = self.get_vr_offset()[2]
- self.set_vr_offset([offset_to_pos[0], offset_to_pos[1], curr_offset_z])
- else:
- self.set_vr_offset(offset_to_pos)
-
- def get_vr_pos(self):
- """
- Gets the world position of the VR system in iGibson space.
- """
- return self.get_hmd_world_pos() + np.array(self.get_vr_offset())
-
- def set_vr_offset(self, pos=None):
- """
- Sets the translational offset of the VR system (HMD, left controller, right controller) from world space coordinates.
- Can be used for many things, including adjusting height and teleportation-based movement
- :param pos: must be a list of three floats, corresponding to x, y, z in Gibson coordinate space
- """
-
- self.renderer.vrsys.setVROffset(-pos[1], pos[2], -pos[0])
-
- def get_vr_offset(self):
- """
- Gets the current VR offset vector in list form: x, y, z (in iGibson coordinates)
- """
-
- x, y, z = self.renderer.vrsys.getVROffset()
- return [x, y, z]
-
- def get_device_coordinate_system(self, device):
- """
- Gets the direction vectors representing the device's coordinate system in list form: x, y, z (in Gibson coordinates)
- List contains "right", "up" and "forward" vectors in that order
- :param device: can be one of "hmd", "left_controller" or "right_controller"
- """
-
- vec_list = []
-
- coordinate_sys = self.renderer.vrsys.getDeviceCoordinateSystem(device)
- for dir_vec in coordinate_sys:
- vec_list.append(dir_vec)
-
- return vec_list
-
- def trigger_haptic_pulse(self, device, strength):
- """
- Triggers a haptic pulse of the specified strength (0 is weakest, 1 is strongest)
- :param device: device to trigger haptic for - can be any one of [left_controller, right_controller]
- :param strength: strength of haptic pulse (0 is weakest, 1 is strongest)
- """
- assert device in ["left_controller", "right_controller"]
-
- self.renderer.vrsys.triggerHapticPulseForDevice(device, int(self.max_haptic_duration * strength))
diff --git a/igibson/tasks/behavior_task.py b/igibson/tasks/behavior_task.py
deleted file mode 100644
index 2879d0c22..000000000
--- a/igibson/tasks/behavior_task.py
+++ /dev/null
@@ -1,949 +0,0 @@
-import datetime
-import logging
-from collections import OrderedDict
-
-import networkx as nx
-import pybullet as p
-from bddl.activity import (
- Conditions,
- evaluate_goal_conditions,
- get_goal_conditions,
- get_ground_goal_state_options,
- get_initial_conditions,
- get_natural_goal_conditions,
- get_object_scope,
-)
-from bddl.condition_evaluation import Negation
-from bddl.logic_base import AtomicFormula
-from bddl.object_taxonomy import ObjectTaxonomy
-
-import igibson
-from igibson.external.pybullet_tools.utils import *
-from igibson.object_states.on_floor import RoomFloor
-from igibson.objects.articulated_object import URDFObject
-from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
-from igibson.reward_functions.potential_reward import PotentialReward
-from igibson.robots.behavior_robot import BehaviorRobot
-from igibson.robots.fetch import Fetch
-from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.simulator import Simulator
-from igibson.tasks.bddl_backend import IGibsonBDDLBackend
-from igibson.tasks.task_base import BaseTask
-from igibson.termination_conditions.predicate_goal import PredicateGoal
-from igibson.termination_conditions.timeout import Timeout
-from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path
-from igibson.utils.checkpoint_utils import load_checkpoint, load_internal_states, save_internal_states
-from igibson.utils.constants import (
- AGENT_POSE_DIM,
- FLOOR_SYNSET,
- MAX_TASK_RELEVANT_OBJS,
- NON_SAMPLEABLE_OBJECTS,
- TASK_RELEVANT_OBJS_OBS_DIM,
- SimulatorMode,
-)
-from igibson.utils.ig_logging import IGLogWriter
-from igibson.utils.utils import quatXYZWFromRotMat, restoreState
-
-KINEMATICS_STATES = frozenset({"inside", "ontop", "under", "onfloor"})
-
-
-class BehaviorTask(BaseTask):
- def __init__(self, env):
- super(BehaviorTask, self).__init__(env)
- self.scene = env.scene
-
- self.termination_conditions = [
- Timeout(self.config),
- PredicateGoal(self.config),
- ]
- self.reward_functions = [
- PotentialReward(self.config),
- ]
-
- self.object_taxonomy = ObjectTaxonomy()
- self.backend = IGibsonBDDLBackend()
- self.update_problem(
- behavior_activity=self.config.get("task", None),
- activity_definition=self.config.get("task_id", 0),
- predefined_problem=self.config.get("predefined_problem", None),
- )
- self.load_clutter = self.config.get("load_clutter", True)
- self.debug_obj_inst = self.config.get("debug_obj_inst", None)
- self.online_sampling = self.config.get("online_sampling", False)
- self.reset_checkpoint_idx = self.config.get("reset_checkpoint_idx", -1)
- self.reset_checkpoint_dir = self.config.get("reset_checkpoint_dir", None)
- self.task_obs_dim = MAX_TASK_RELEVANT_OBJS * TASK_RELEVANT_OBJS_OBS_DIM + AGENT_POSE_DIM
-
- self.initialized, self.feedback = self.initialize(env)
- self.state_history = {}
- self.initial_state = self.save_scene(env)
- if self.config.get("should_highlight_task_relevant_objs", True):
- self.highlight_task_relevant_objs(env)
- if self.config.get("should_activate_behavior_robot", True):
- env.robots[0].activate()
-
- self.episode_save_dir = self.config.get("episode_save_dir", None)
- if self.episode_save_dir is not None:
- os.makedirs(self.episode_save_dir, exist_ok=True)
- self.log_writer = None
-
- def highlight_task_relevant_objs(self, env):
- for _, obj in self.object_scope.items():
- if obj.category in ["agent", "room_floor"]:
- continue
- obj.highlight()
-
- def update_problem(self, behavior_activity, activity_definition, predefined_problem=None):
- self.behavior_activity = behavior_activity
- self.activity_definition = activity_definition
- self.conds = Conditions(
- behavior_activity, activity_definition, simulator_name="igibson", predefined_problem=predefined_problem
- )
- self.object_scope = get_object_scope(self.conds)
- self.obj_inst_to_obj_cat = {
- obj_inst: obj_cat
- for obj_cat in self.conds.parsed_objects
- for obj_inst in self.conds.parsed_objects[obj_cat]
- }
-
- # Generate initial and goal conditions
- self.initial_conditions = get_initial_conditions(self.conds, self.backend, self.object_scope)
- self.goal_conditions = get_goal_conditions(self.conds, self.backend, self.object_scope)
- self.ground_goal_state_options = get_ground_goal_state_options(
- self.conds, self.backend, self.object_scope, self.goal_conditions
- )
-
- # Demo attributes
- self.instruction_order = np.arange(len(self.conds.parsed_goal_conditions))
- np.random.shuffle(self.instruction_order)
- self.currently_viewed_index = 0
- self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
- self.current_success = False
- self.current_goal_status = {"satisfied": [], "unsatisfied": []}
- self.natural_language_goal_conditions = get_natural_goal_conditions(self.conds)
-
- def get_potential(self, env):
- # Evaluate the first ground goal state option as the potential
- _, satisfied_predicates = evaluate_goal_conditions(self.ground_goal_state_options[0])
- success_score = len(satisfied_predicates["satisfied"]) / (
- len(satisfied_predicates["satisfied"]) + len(satisfied_predicates["unsatisfied"])
- )
- return -success_score
-
- def save_scene(self, env):
- snapshot_id = p.saveState()
- self.state_history[snapshot_id] = save_internal_states(env.simulator)
- return snapshot_id
-
- def reset_scene(self, env):
- if self.reset_checkpoint_dir is not None and self.reset_checkpoint_idx != -1:
- load_checkpoint(env.simulator, self.reset_checkpoint_dir, self.reset_checkpoint_idx)
- else:
- restoreState(self.initial_state)
- load_internal_states(env.simulator, self.state_history[self.initial_state])
-
- def reset_agent(self, env):
- return
-
- def reset_variables(self, env):
- if self.log_writer is not None:
- self.log_writer.end_log_session()
- del self.log_writer
- self.log_writer = None
-
- if self.episode_save_dir:
- timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
- task = self.config["task"]
- task_id = self.config["task_id"]
- scene = self.config["scene_id"]
- vr_log_path = os.path.join(
- self.episode_save_dir,
- "{}_{}_{}_{}_{}.hdf5".format(
- self.behavior_activity,
- self.activity_definition,
- env.scene.scene_id,
- timestamp,
- env.current_episode,
- ),
- )
- self.log_writer = IGLogWriter(
- env.simulator,
- frames_before_write=200,
- log_filepath=vr_log_path,
- task=self,
- store_vr=False,
- vr_robot=env.robots[0],
- filter_objects=True,
- )
- self.log_writer.set_up_data_storage()
-
- def step(self, env):
- if self.log_writer is not None:
- self.log_writer.process_frame()
-
- def initialize(self, env):
- accept_scene = True
- feedback = None
-
- if self.online_sampling:
- # Reject scenes with missing non-sampleable objects
- # Populate object_scope with sampleable objects and the robot
- accept_scene, feedback = self.check_scene(env)
- if not accept_scene:
- return accept_scene, feedback
- # Sample objects to satisfy initial conditions
- accept_scene, feedback = self.sample(env)
- if not accept_scene:
- return accept_scene, feedback
-
- if self.load_clutter:
- # Add clutter objects into the scenes
- self.clutter_scene(env)
- else:
- # Load existing scene cache and assign object scope accordingly
- self.assign_object_scope_with_cache(env)
-
- # Generate goal condition with the fully populated self.object_scope
- self.goal_conditions = get_goal_conditions(self.conds, self.backend, self.object_scope)
- self.ground_goal_state_options = get_ground_goal_state_options(
- self.conds, self.backend, self.object_scope, self.goal_conditions
- )
- return accept_scene, feedback
-
- def parse_non_sampleable_object_room_assignment(self):
- self.room_type_to_obj_inst = {}
- self.non_sampleable_object_inst = set()
- for cond in self.conds.parsed_initial_conditions:
- if cond[0] == "inroom":
- obj_inst, room_type = cond[1], cond[2]
- obj_cat = self.obj_inst_to_obj_cat[obj_inst]
- if obj_cat not in NON_SAMPLEABLE_OBJECTS:
- # Invalid room assignment
- return "You have assigned room type for [{}], but [{}] is sampleable. Only non-sampleable objects can have room assignment.".format(
- obj_cat, obj_cat
- )
- if room_type not in self.scene.room_sem_name_to_ins_name:
- # Missing room type
- return "Room type [{}] missing in scene [{}].".format(room_type, self.scene.scene_id)
- if room_type not in self.room_type_to_obj_inst:
- self.room_type_to_obj_inst[room_type] = []
- self.room_type_to_obj_inst[room_type].append(obj_inst)
-
- if obj_inst in self.non_sampleable_object_inst:
- # Duplicate room assignment
- return "Object [{}] has more than one room assignment".format(obj_inst)
-
- self.non_sampleable_object_inst.add(obj_inst)
-
- for obj_cat in self.conds.parsed_objects:
- if obj_cat not in NON_SAMPLEABLE_OBJECTS:
- continue
- for obj_inst in self.conds.parsed_objects[obj_cat]:
- if obj_inst not in self.non_sampleable_object_inst:
- # Missing room assignment
- return "All non-sampleable objects should have room assignment. [{}] does not have one.".format(
- obj_inst
- )
-
- def build_sampling_order(self):
- """
- Sampling orders is a list of lists: [[batch_1_inst_1, ... batch_1_inst_N], [batch_2_inst_1, batch_2_inst_M], ...]
- Sampling should happen for batch 1 first, then batch 2, so on and so forth
- Example: OnTop(plate, table) should belong to batch 1, and OnTop(apple, plate) should belong to batch 2
- """
- self.sampling_orders = []
- cur_batch = self.non_sampleable_object_inst
- while len(cur_batch) > 0:
- self.sampling_orders.append(cur_batch)
- next_batch = set()
- for cond in self.conds.parsed_initial_conditions:
- if len(cond) == 3 and cond[2] in cur_batch:
- next_batch.add(cond[1])
- cur_batch = next_batch
-
- if len(self.sampling_orders) > 0:
- remaining_objs = self.object_scope.keys() - set.union(*self.sampling_orders)
- else:
- remaining_objs = self.object_scope.keys()
-
- if len(remaining_objs) != 0:
- return "Some objects do not have any kinematic condition defined for them in the initial conditions: {}".format(
- ", ".join(remaining_objs)
- )
-
- def build_non_sampleable_object_scope(self):
- """
- Store simulator object options for non-sampleable objects in self.non_sampleable_object_scope
- {
- "table1": {
- "living_room_0": [URDFObject, URDFObject, URDFObject],
- "living_room_1": [URDFObject]
- },
- "table2": {
- "living_room_0": [URDFObject, URDFObject],
- "living_room_1": [URDFObject, URDFObject]
- },
- "chair1": {
- "living_room_0": [URDFObject],
- "living_room_1": [URDFObject]
- },
- }
- """
- room_type_to_scene_objs = {}
- for room_type in self.room_type_to_obj_inst:
- room_type_to_scene_objs[room_type] = {}
- for obj_inst in self.room_type_to_obj_inst[room_type]:
- room_type_to_scene_objs[room_type][obj_inst] = {}
- obj_cat = self.obj_inst_to_obj_cat[obj_inst]
-
- # We allow burners to be used as if they are stoves
- categories = self.object_taxonomy.get_subtree_igibson_categories(obj_cat)
- if obj_cat == "stove.n.01":
- categories += self.object_taxonomy.get_subtree_igibson_categories("burner.n.02")
-
- for room_inst in self.scene.room_sem_name_to_ins_name[room_type]:
- if obj_cat == FLOOR_SYNSET:
- # TODO: remove after split floors
- # Create a RoomFloor for each room instance
- # This object is NOT imported by the simulator
- room_floor = RoomFloor(
- category="room_floor",
- name="room_floor_{}".format(room_inst),
- scene=self.scene,
- room_instance=room_inst,
- floor_obj=self.scene.objects_by_name["floors"],
- )
- scene_objs = [room_floor]
- else:
- room_objs = []
- if room_inst in self.scene.objects_by_room:
- room_objs = self.scene.objects_by_room[room_inst]
- # A list of scene objects that satisfy the requested categories
- scene_objs = [obj for obj in room_objs if obj.category in categories]
-
- if len(scene_objs) != 0:
- room_type_to_scene_objs[room_type][obj_inst][room_inst] = scene_objs
-
- error_msg = self.consolidate_room_instance(room_type_to_scene_objs, "initial_pre-sampling")
- if error_msg:
- return error_msg
- self.non_sampleable_object_scope = room_type_to_scene_objs
-
- def import_sampleable_objects(self, env):
- self.newly_added_objects = set()
- num_new_obj = 0
- # Only populate self.object_scope for sampleable objects
- avg_category_spec = get_ig_avg_category_specs()
- for obj_cat in self.conds.parsed_objects:
- if obj_cat == "agent.n.01":
- continue
- if obj_cat in NON_SAMPLEABLE_OBJECTS:
- continue
- is_sliceable = self.object_taxonomy.has_ability(obj_cat, "sliceable")
- categories = self.object_taxonomy.get_subtree_igibson_categories(obj_cat)
-
- # TODO: temporary hack
- remove_categories = [
- "pop_case", # too large
- "jewel", # too small
- "ring", # too small
- ]
- for remove_category in remove_categories:
- if remove_category in categories:
- categories.remove(remove_category)
-
- # for sliceable objects, exclude the half_XYZ categories
- if is_sliceable:
- categories = [cat for cat in categories if "half_" not in cat]
-
- for obj_inst in self.conds.parsed_objects[obj_cat]:
- category = np.random.choice(categories)
- category_path = get_ig_category_path(category)
- model_choices = os.listdir(category_path)
-
- # Filter object models if the object category is openable
- synset = self.object_taxonomy.get_class_name_from_igibson_category(category)
- if self.object_taxonomy.has_ability(synset, "openable"):
- # Always use the articulated version of a certain object if its category is openable
- # E.g. backpack, jar, etc
- model_choices = [m for m in model_choices if "articulated_" in m]
- if len(model_choices) == 0:
- return "{} is Openable, but does not have articulated models.".format(category)
-
- # Randomly select an object model
- model = np.random.choice(model_choices)
-
- # TODO: temporary hack
- # for "collecting aluminum cans", we need pop cans (not bottles)
- if category == "pop" and self.behavior_activity in ["collecting_aluminum_cans"]:
- model = np.random.choice([str(i) for i in range(40, 46)])
- if category == "spoon" and self.behavior_activity in ["polishing_silver"]:
- model = np.random.choice([str(i) for i in [2, 5, 6]])
-
- model_path = get_ig_model_path(category, model)
- filename = os.path.join(model_path, model + ".urdf")
- obj_name = "{}_{}".format(category, len(self.scene.objects_by_name))
- # create the object and set the initial position to be far-away
- simulator_obj = URDFObject(
- filename,
- name=obj_name,
- category=category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(category),
- fit_avg_dim_volume=True,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=[100 + num_new_obj, 100, -100],
- )
- num_new_obj += 1
-
- if is_sliceable:
- whole_object = simulator_obj
- object_parts = []
- assert "object_parts" in simulator_obj.metadata, "object_parts not found in metadata: [{}]".format(
- model_path
- )
-
- for i, part in enumerate(simulator_obj.metadata["object_parts"]):
- category = part["category"]
- model = part["model"]
- # Scale the offset accordingly
- part_pos = part["pos"] * whole_object.scale
- part_orn = part["orn"]
- model_path = get_ig_model_path(category, model)
- filename = os.path.join(model_path, model + ".urdf")
- obj_name = whole_object.name + "_part_{}".format(i)
- # create the object part (or half object) and set the initial position to be far-away
- simulator_obj_part = URDFObject(
- filename,
- name=obj_name,
- category=category,
- model_path=model_path,
- avg_obj_dims=avg_category_spec.get(category),
- fit_avg_dim_volume=False,
- scale=whole_object.scale,
- texture_randomization=False,
- overwrite_inertial=True,
- initial_pos=[100 + num_new_obj, 100, -100],
- )
- num_new_obj += 1
- object_parts.append((simulator_obj_part, (part_pos, part_orn)))
-
- assert len(object_parts) > 0
- grouped_obj_parts = ObjectGrouper(object_parts)
- simulator_obj = ObjectMultiplexer(
- whole_object.name + "_multiplexer", [whole_object, grouped_obj_parts], 0
- )
-
- # Load the object into the simulator
- if not self.scene.loaded:
- self.scene.add_object(simulator_obj, simulator=None)
- else:
- env.simulator.import_object(simulator_obj)
- self.newly_added_objects.add(simulator_obj)
- self.object_scope[obj_inst] = simulator_obj
-
- def check_scene(self, env):
- error_msg = self.parse_non_sampleable_object_room_assignment()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- error_msg = self.build_sampling_order()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- error_msg = self.build_non_sampleable_object_scope()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- error_msg = self.import_sampleable_objects(env)
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- self.object_scope["agent.n.01_1"] = self.get_agent(env)
-
- return True, None
-
- def get_agent(self, env):
- if isinstance(env.robots[0], BehaviorRobot):
- return env.robots[0].links["body"]
- else:
- return env.robots[0]
-
- def assign_object_scope_with_cache(self, env):
- # Assign object_scope based on a cached scene
- for obj_inst in self.object_scope:
- matched_sim_obj = None
- # TODO: remove after split floors
- if "floor.n.01" in obj_inst:
- floor_obj = self.scene.objects_by_name["floors"]
- bddl_object_scope = floor_obj.bddl_object_scope.split(",")
- bddl_object_scope = {item.split(":")[0]: item.split(":")[1] for item in bddl_object_scope}
- assert obj_inst in bddl_object_scope
- room_inst = bddl_object_scope[obj_inst].replace("room_floor_", "")
- matched_sim_obj = RoomFloor(
- category="room_floor",
- name=bddl_object_scope[obj_inst],
- scene=self.scene,
- room_instance=room_inst,
- floor_obj=floor_obj,
- )
- elif obj_inst == "agent.n.01_1":
- matched_sim_obj = self.get_agent(env)
- agent = matched_sim_obj
- if isinstance(env.robots[0], BehaviorRobot):
- agent = matched_sim_obj.parent
-
- if isinstance(env.robots[0], BehaviorRobot):
- agent_key = "BRBody_1"
- elif isinstance(env.robots[0], Fetch):
- agent_key = "fetch_gripper_robot_1"
- else:
- raise Exception("Only BehaviorRobot and Fetch have scene caches")
-
- agent.set_position_orientation(
- self.scene.agent[agent_key]["xyz"],
- quat_from_euler(self.scene.agent[agent_key]["rpy"]),
- )
- else:
- for _, sim_obj in self.scene.objects_by_name.items():
- if sim_obj.bddl_object_scope == obj_inst:
- matched_sim_obj = sim_obj
- break
- assert matched_sim_obj is not None, obj_inst
- self.object_scope[obj_inst] = matched_sim_obj
-
- def process_single_condition(self, condition):
- if not isinstance(condition.children[0], Negation) and not isinstance(condition.children[0], AtomicFormula):
- logging.warning(("Skipping over sampling of predicate that is not a negation or an atomic formula"))
- return None, None
-
- if isinstance(condition.children[0], Negation):
- condition = condition.children[0].children[0]
- positive = False
- else:
- condition = condition.children[0]
- positive = True
-
- return condition, positive
-
- def group_initial_conditions(self):
- self.non_sampleable_obj_conditions = []
- self.sampleable_obj_conditions = []
-
- # TODO: currently we assume self.initial_conditions is a list of
- # bddl.condition_evaluation.HEAD, each with one child.
- # This chid is either a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate or
- # a Negation of a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate
- for condition in self.initial_conditions:
- condition, positive = self.process_single_condition(condition)
- if condition is None:
- continue
-
- if condition.STATE_NAME in KINEMATICS_STATES and not positive:
- return "Initial condition has negative kinematic conditions: {}".format(condition.body)
-
- condition_body = set(condition.body)
- if len(self.non_sampleable_object_inst.intersection(condition_body)) > 0:
- self.non_sampleable_obj_conditions.append((condition, positive))
- else:
- self.sampleable_obj_conditions.append((condition, positive))
-
- def filter_object_scope(self, input_object_scope, conditions, condition_type):
- filtered_object_scope = {}
- for room_type in input_object_scope:
- filtered_object_scope[room_type] = {}
- for scene_obj in input_object_scope[room_type]:
- filtered_object_scope[room_type][scene_obj] = {}
- for room_inst in input_object_scope[room_type][scene_obj]:
- # These are a list of candidate simulator objects that need sampling test
- for obj in input_object_scope[room_type][scene_obj][room_inst]:
- # Temporarily set object_scope to point to this candidate object
- self.object_scope[scene_obj] = obj
-
- success = True
- # If this candidate object is not involved in any conditions,
- # success will be True by default and this object will qualify
- for condition, positive in conditions:
- # Sample positive kinematic conditions that involve this candidate object
- if condition.STATE_NAME in KINEMATICS_STATES and positive and scene_obj in condition.body:
- # Use pybullet GUI for debugging
- if self.debug_obj_inst is not None and self.debug_obj_inst == condition.body[0]:
- igibson.debug_sampling = True
- obj_pos = obj.get_position()
- # Set the camera to have a bird's eye view of the sampling process
- p.resetDebugVisualizerCamera(
- cameraDistance=3.0,
- cameraYaw=0,
- cameraPitch=-89.99999,
- cameraTargetPosition=obj_pos,
- )
-
- success = condition.sample(binary_state=positive)
- log_msg = " ".join(
- [
- "{} condition sampling".format(condition_type),
- room_type,
- scene_obj,
- room_inst,
- obj.name,
- condition.STATE_NAME,
- str(condition.body),
- str(success),
- ]
- )
- logging.warning(log_msg)
-
- # If any condition fails for this candidate object, skip
- if not success:
- break
-
- # If this candidate object fails, move on to the next candidate object
- if not success:
- continue
-
- if room_inst not in filtered_object_scope[room_type][scene_obj]:
- filtered_object_scope[room_type][scene_obj][room_inst] = []
- filtered_object_scope[room_type][scene_obj][room_inst].append(obj)
-
- return filtered_object_scope
-
- def consolidate_room_instance(self, filtered_object_scope, condition_type):
- for room_type in filtered_object_scope:
- # For each room_type, filter in room_inst that has successful
- # sampling options for all obj_inst in this room_type
- room_inst_satisfied = set.intersection(
- *[
- set(filtered_object_scope[room_type][obj_inst].keys())
- for obj_inst in filtered_object_scope[room_type]
- ]
- )
-
- if len(room_inst_satisfied) == 0:
- error_msg = "{}: Room type [{}] of scene [{}] do not contain or cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format(
- condition_type, room_type, self.scene.scene_id
- )
- for obj_inst in filtered_object_scope[room_type]:
- error_msg += (
- "{}: ".format(obj_inst) + ", ".join(filtered_object_scope[room_type][obj_inst].keys()) + "\n"
- )
-
- return error_msg
-
- for obj_inst in filtered_object_scope[room_type]:
- filtered_object_scope[room_type][obj_inst] = {
- key: val
- for key, val in filtered_object_scope[room_type][obj_inst].items()
- if key in room_inst_satisfied
- }
-
- def maximum_bipartite_matching(self, filtered_object_scope, condition_type):
- # For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects
- # Left nodes: a list of object instance in scope
- # Right nodes: a list of simulator objects
- # Edges: if the simulator object can support the sampling requirement of ths object instance
- for room_type in filtered_object_scope:
- # The same room instances will be shared across all scene obj in a given room type
- some_obj = list(filtered_object_scope[room_type].keys())[0]
- room_insts = list(filtered_object_scope[room_type][some_obj].keys())
- success = False
- # Loop through each room instance
- for room_inst in room_insts:
- graph = nx.Graph()
- # For this given room instance, gether mapping from obj instance to a list of simulator obj
- obj_inst_to_obj_per_room_inst = {}
- for obj_inst in filtered_object_scope[room_type]:
- obj_inst_to_obj_per_room_inst[obj_inst] = filtered_object_scope[room_type][obj_inst][room_inst]
- top_nodes = []
- log_msg = "MBM for room instance [{}]".format(room_inst)
- logging.warning((log_msg))
- for obj_inst in obj_inst_to_obj_per_room_inst:
- for obj in obj_inst_to_obj_per_room_inst[obj_inst]:
- # Create an edge between obj instance and each of the simulator obj that supports sampling
- graph.add_edge(obj_inst, obj)
- log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name)
- logging.warning((log_msg))
- top_nodes.append(obj_inst)
- # Need to provide top_nodes that contain all nodes in one bipartite node set
- # The matches will have two items for each match (e.g. A -> B, B -> A)
- matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes)
- if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst):
- logging.warning(("Object scope finalized:"))
- for obj_inst, obj in matches.items():
- if obj_inst in obj_inst_to_obj_per_room_inst:
- self.object_scope[obj_inst] = obj
- logging.warning((obj_inst, obj.name))
- success = True
- break
- if not success:
- return "{}: Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format(
- condition_type, room_type, self.scene.scene_id
- )
-
- def sample_conditions(self, input_object_scope, conditions, condition_type):
- filtered_object_scope = self.filter_object_scope(input_object_scope, conditions, condition_type)
- error_msg = self.consolidate_room_instance(filtered_object_scope, condition_type)
- if error_msg:
- return error_msg, None
- return self.maximum_bipartite_matching(filtered_object_scope, condition_type), filtered_object_scope
-
- def sample_initial_conditions(self):
- error_msg, self.non_sampleable_object_scope_filtered_initial = self.sample_conditions(
- self.non_sampleable_object_scope, self.non_sampleable_obj_conditions, "initial"
- )
- return error_msg
-
- def sample_goal_conditions(self):
- np.random.shuffle(self.ground_goal_state_options)
- logging.warning(("number of ground_goal_state_options", len(self.ground_goal_state_options)))
- num_goal_condition_set_to_test = 10
-
- goal_condition_success = False
- # Try to fulfill different set of ground goal conditions (maximum num_goal_condition_set_to_test)
- for goal_condition_set in self.ground_goal_state_options[:num_goal_condition_set_to_test]:
- goal_condition_processed = []
- for condition in goal_condition_set:
- condition, positive = self.process_single_condition(condition)
- if condition is None:
- continue
- goal_condition_processed.append((condition, positive))
-
- error_msg, _ = self.sample_conditions(
- self.non_sampleable_object_scope_filtered_initial, goal_condition_processed, "goal"
- )
- if not error_msg:
- # if one set of goal conditions (and initial conditions) are satisfied, sampling is successful
- goal_condition_success = True
- break
-
- if not goal_condition_success:
- return error_msg
-
- def sample_initial_conditions_final(self):
- # Do the final round of sampling with object scope fixed
- for condition, positive in self.non_sampleable_obj_conditions:
- num_trials = 10
- for _ in range(num_trials):
- success = condition.sample(binary_state=positive)
- if success:
- break
- if not success:
- error_msg = "Non-sampleable object conditions failed even after successful matching: {}".format(
- condition.body
- )
- return error_msg
-
- # Use ray casting for ontop and inside sampling for sampleable objects
- for condition, positive in self.sampleable_obj_conditions:
- if condition.STATE_NAME in ["inside", "ontop"]:
- condition.kwargs["use_ray_casting_method"] = True
-
- if len(self.sampling_orders) > 0:
- # Pop non-sampleable objects
- self.sampling_orders.pop(0)
- for cur_batch in self.sampling_orders:
- # First sample non-sliced conditions
- for condition, positive in self.sampleable_obj_conditions:
- if condition.STATE_NAME == "sliced":
- continue
- # Sample conditions that involve the current batch of objects
- if condition.body[0] in cur_batch:
- num_trials = 100
- for _ in range(num_trials):
- success = condition.sample(binary_state=positive)
- if success:
- break
- if not success:
- return "Sampleable object conditions failed: {} {}".format(
- condition.STATE_NAME, condition.body
- )
-
- # Then sample non-sliced conditions
- for condition, positive in self.sampleable_obj_conditions:
- if condition.STATE_NAME != "sliced":
- continue
- # Sample conditions that involve the current batch of objects
- if condition.body[0] in cur_batch:
- success = condition.sample(binary_state=positive)
- if not success:
- return "Sampleable object conditions failed: {}".format(condition.body)
-
- def sample(self, env, validate_goal=True):
- # Before sampling, set robot to be far away
- env.robots[0].set_position_orientation([300, 300, 300], [0, 0, 0, 1])
- error_msg = self.group_initial_conditions()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- error_msg = self.sample_initial_conditions()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- if validate_goal:
- error_msg = self.sample_goal_conditions()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- error_msg = self.sample_initial_conditions_final()
- if error_msg:
- logging.warning(error_msg)
- return False, error_msg
-
- return True, None
-
- def import_non_colliding_objects(self, env, objects, existing_objects=[], min_distance=0.5):
- """
- Loads objects into the scene such that they don't collide with existing objects.
-
- :param env: iGibsonEnv
- :param objects: A dictionary with objects, from a scene loaded with a particular URDF
- :param existing_objects: A list of objects that needs to be kept min_distance away when loading the new objects
- :param min_distance: A minimum distance to require for objects to load
- """
- state_id = p.saveState()
- objects_to_add = []
-
- for obj_name in objects:
- obj = objects[obj_name]
-
- # Do not allow duplicate object categories
- if obj.category in env.simulator.scene.objects_by_category:
- continue
-
- add = True
- body_ids = []
-
- # Filter based on the minimum distance to any existing object
- for idx in range(len(obj.urdf_paths)):
- body_id = p.loadURDF(obj.urdf_paths[idx])
- body_ids.append(body_id)
- transformation = obj.poses[idx]
- pos = transformation[0:3, 3]
- orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))
- dynamics_info = p.getDynamicsInfo(body_id, -1)
- inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
- pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
- pos = list(pos)
- min_distance_to_existing_object = None
- for existing_object in existing_objects:
- # If a sliced obj is an existing_object, get_position will not work
- if isinstance(existing_object, ObjectMultiplexer) and isinstance(
- existing_object.current_selection(), ObjectGrouper
- ):
- obj_pos = np.array([obj.get_position() for obj in existing_object.objects]).mean(axis=0)
- else:
- obj_pos = existing_object.get_position()
- distance = np.linalg.norm(np.array(pos) - np.array(obj_pos))
- if min_distance_to_existing_object is None or min_distance_to_existing_object > distance:
- min_distance_to_existing_object = distance
-
- if min_distance_to_existing_object < min_distance:
- add = False
- break
-
- pos[2] += 0.01 # slighly above to not touch furniture
- p.resetBasePositionAndOrientation(body_id, pos, orn)
-
- # Filter based on collisions with any existing object
- if add:
- p.stepSimulation()
-
- for body_id in body_ids:
- in_collision = len(p.getContactPoints(body_id)) > 0
- if in_collision:
- add = False
- break
-
- if add:
- objects_to_add.append(obj)
-
- for body_id in body_ids:
- p.removeBody(body_id)
-
- restoreState(state_id)
-
- p.removeState(state_id)
-
- for obj in objects_to_add:
- env.simulator.import_object(obj)
-
- def clutter_scene(self, env):
- """
- Load clutter objects into the scene from a random clutter scene
- """
- scene_id = self.scene.scene_id
- clutter_ids = [""] + list(range(2, 5))
- clutter_id = np.random.choice(clutter_ids)
- clutter_scene = InteractiveIndoorScene(scene_id, "{}_clutter{}".format(scene_id, clutter_id))
- existing_objects = [value for key, value in self.object_scope.items() if "floor.n.01" not in key]
- self.import_non_colliding_objects(
- env=env, objects=clutter_scene.objects_by_name, existing_objects=existing_objects, min_distance=0.5
- )
-
- def get_task_obs(self, env):
- state = OrderedDict()
- task_obs = np.zeros((self.task_obs_dim))
- state["robot_pos"] = np.array(env.robots[0].get_position())
- state["robot_orn"] = np.array(env.robots[0].get_rpy())
-
- i = 0
- for _, v in self.object_scope.items():
- if isinstance(v, URDFObject):
- state["obj_{}_valid".format(i)] = 1.0
- state["obj_{}_pos".format(i)] = np.array(v.get_position())
- state["obj_{}_orn".format(i)] = np.array(p.getEulerFromQuaternion(v.get_orientation()))
- grasping_objects = env.robots[0].is_grasping(v.get_body_id())
- for grasp_idx, grasping in enumerate(grasping_objects):
- state["obj_{}_pos_in_gripper_{}".format(i, grasp_idx)] = float(grasping)
- i += 1
-
- state_list = []
- for k, v in state.items():
- if isinstance(v, list):
- state_list.extend(v)
- elif isinstance(v, tuple):
- state_list.extend(list(v))
- elif isinstance(v, np.ndarray):
- state_list.extend(list(v))
- elif isinstance(v, (float, int)):
- state_list.append(v)
- else:
- raise ValueError("cannot serialize task obs")
-
- assert len(state_list) <= len(task_obs)
- task_obs[: len(state_list)] = state_list
-
- return task_obs
-
- def check_success(self):
- self.current_success, self.current_goal_status = evaluate_goal_conditions(self.goal_conditions)
- return self.current_success, self.current_goal_status
-
- def get_termination(self, env, collision_links=[], action=None, info={}):
- """
- Aggreate termination conditions and fill info
- """
- done, info = super(BehaviorTask, self).get_termination(env, collision_links, action, info)
- info["goal_status"] = self.current_goal_status
- return done, info
-
- def show_instruction(self):
- satisfied = self.currently_viewed_instruction in self.current_goal_status["satisfied"]
- natural_language_condition = self.natural_language_goal_conditions[self.currently_viewed_instruction]
- objects = self.goal_conditions[self.currently_viewed_instruction].get_relevant_objects()
- text_color = (
- [83.0 / 255.0, 176.0 / 255.0, 72.0 / 255.0] if satisfied else [255.0 / 255.0, 51.0 / 255.0, 51.0 / 255.0]
- )
-
- return natural_language_condition, text_color, objects
-
- def iterate_instruction(self):
- self.currently_viewed_index = (self.currently_viewed_index + 1) % len(self.conds.parsed_goal_conditions)
- self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
diff --git a/igibson/tasks/dummy_task.py b/igibson/tasks/dummy_task.py
deleted file mode 100644
index 171360947..000000000
--- a/igibson/tasks/dummy_task.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import numpy as np
-
-from igibson.tasks.task_base import BaseTask
-
-
-class DummyTask(BaseTask):
- """
- Point Nav Fixed Task
- The goal is to navigate to a fixed goal position
- """
-
- def __init__(self, env):
- super(DummyTask, self).__init__(env)
- self.task_obs_dim = 0
-
- def reset_scene(self, env):
- """
- Task-specific scene reset: reset scene objects or floor plane
-
- :param env: environment instance
- """
- return
-
- def reset_agent(self, env):
- """
- Task-specific agent reset: land the robot to initial pose, compute initial potential
-
- :param env: environment instance
- """
- return
-
- def get_task_obs(self, env):
- """
- Get task-specific observation, including goal position, current velocities, etc.
-
- :param env: environment instance
- :return: task-specific observation
- """
- return np.zeros(0, dtype=np.float32)
diff --git a/igibson/tasks/dynamic_nav_random_task.py b/igibson/tasks/dynamic_nav_random_task.py
index 58fba6940..42fb8c8bc 100644
--- a/igibson/tasks/dynamic_nav_random_task.py
+++ b/igibson/tasks/dynamic_nav_random_task.py
@@ -1,9 +1,8 @@
import numpy as np
import pybullet as p
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.tasks.point_nav_random_task import PointNavRandomTask
-from igibson.utils.utils import restoreState
class DynamicNavRandomTask(PointNavRandomTask):
@@ -49,7 +48,7 @@ def reset_dynamic_objects(self, env):
_, pos = env.scene.get_random_point(floor=self.floor_num)
orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])
reset_success = env.test_valid_position(robot, pos, orn)
- restoreState(state_id)
+ p.restoreState(state_id)
if reset_success:
break
diff --git a/igibson/tasks/interactive_nav_random_task.py b/igibson/tasks/interactive_nav_random_task.py
index bc63f9da6..c5e8e4adf 100644
--- a/igibson/tasks/interactive_nav_random_task.py
+++ b/igibson/tasks/interactive_nav_random_task.py
@@ -3,7 +3,6 @@
from igibson.objects.ycb_object import YCBObject
from igibson.tasks.point_nav_random_task import PointNavRandomTask
-from igibson.utils.utils import restoreState
class InteractiveNavRandomTask(PointNavRandomTask):
@@ -15,7 +14,7 @@ class InteractiveNavRandomTask(PointNavRandomTask):
def __init__(self, env):
super(InteractiveNavRandomTask, self).__init__(env)
self.interactive_objects = self.load_interactive_objects(env)
- env.collision_ignore_body_b_ids |= set([obj.get_body_id() for obj in self.interactive_objects])
+ env.collision_ignore_body_b_ids |= set([obj.body_id for obj in self.interactive_objects])
def load_interactive_objects(self, env):
"""
@@ -54,7 +53,7 @@ def reset_interactive_objects(self, env):
_, pos = env.scene.get_random_point(floor=self.floor_num)
orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])
reset_success = env.test_valid_position(obj, pos, orn)
- restoreState(state_id)
+ p.restoreState(state_id)
if reset_success:
break
diff --git a/igibson/tasks/point_nav_fixed_task.py b/igibson/tasks/point_nav_fixed_task.py
index c86e13115..7c9f5dd5d 100644
--- a/igibson/tasks/point_nav_fixed_task.py
+++ b/igibson/tasks/point_nav_fixed_task.py
@@ -42,8 +42,8 @@ def __init__(self, env):
self.goal_format = self.config.get("goal_format", "polar")
self.dist_tol = self.termination_conditions[-1].dist_tol
- self.visible_target = self.config.get("visible_target", False)
- self.visible_path = self.config.get("visible_path", False)
+ self.visual_object_at_initial_target_pos = self.config.get("visual_object_at_initial_target_pos", True)
+ self.target_visual_object_visible_to_agent = self.config.get("target_visual_object_visible_to_agent", False)
self.floor_num = 0
self.load_visualization(env)
@@ -54,7 +54,7 @@ def load_visualization(self, env):
:param env: environment instance
"""
- if env.mode != "gui_interactive":
+ if env.mode != "gui":
return
cyl_length = 0.2
@@ -73,16 +73,12 @@ def load_visualization(self, env):
initial_offset=[0, 0, cyl_length / 2.0],
)
- env.simulator.import_object(self.initial_pos_vis_obj)
- env.simulator.import_object(self.target_pos_vis_obj)
-
- # The visual object indicating the initial location is always hidden
- for instance in self.initial_pos_vis_obj.renderer_instances:
- instance.hidden = True
-
- # The visual object indicating the target location may be visible
- for instance in self.target_pos_vis_obj.renderer_instances:
- instance.hidden = not self.visible_target
+ if self.target_visual_object_visible_to_agent:
+ env.simulator.import_object(self.initial_pos_vis_obj)
+ env.simulator.import_object(self.target_pos_vis_obj)
+ else:
+ self.initial_pos_vis_obj.load()
+ self.target_pos_vis_obj.load()
if env.scene.build_graph:
self.num_waypoints_vis = 250
@@ -97,10 +93,7 @@ def load_visualization(self, env):
for _ in range(self.num_waypoints_vis)
]
for waypoint in self.waypoints_vis:
- env.simulator.import_object(waypoint)
- # The path to the target may be visible
- for instance in waypoint.renderer_instances:
- instance.hidden = not self.visible_path
+ waypoint.load()
def get_geodesic_potential(self, env):
"""
@@ -151,11 +144,11 @@ def reset_agent(self, env):
:param env: environment instance
"""
env.land(env.robots[0], self.initial_pos, self.initial_orn)
-
- def reset_variables(self, env):
self.path_length = 0.0
self.robot_pos = self.initial_pos[:2]
self.geodesic_dist = self.get_geodesic_potential(env)
+ for reward_function in self.reward_functions:
+ reward_function.reset(self, env)
def get_termination(self, env, collision_links=[], action=None, info={}):
"""
@@ -222,7 +215,7 @@ def step_visualization(self, env):
:param env: environment instance
"""
- if env.mode != "gui_interactive":
+ if env.mode != "gui":
return
self.initial_pos_vis_obj.set_position(self.initial_pos)
diff --git a/igibson/tasks/point_nav_random_task.py b/igibson/tasks/point_nav_random_task.py
index 7c93fadcb..d4ae96133 100644
--- a/igibson/tasks/point_nav_random_task.py
+++ b/igibson/tasks/point_nav_random_task.py
@@ -4,7 +4,7 @@
import pybullet as p
from igibson.tasks.point_nav_fixed_task import PointNavFixedTask
-from igibson.utils.utils import l2_distance, restoreState
+from igibson.utils.utils import l2_distance
class PointNavRandomTask(PointNavFixedTask):
@@ -39,10 +39,8 @@ def sample_initial_pose_and_target_pos(self, env):
if self.target_dist_min < dist < self.target_dist_max:
break
if not (self.target_dist_min < dist < self.target_dist_max):
- logging.warning("Failed to sample initial and target positions")
+ print("WARNING: Failed to sample initial and target positions")
initial_orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])
- logging.info("Sampled initial pose: {}, {}".format(initial_pos, initial_orn))
- logging.info("Sampled target position: {}".format(target_pos))
return initial_pos, initial_orn, target_pos
def reset_scene(self, env):
@@ -72,7 +70,7 @@ def reset_agent(self, env):
reset_success = env.test_valid_position(
env.robots[0], initial_pos, initial_orn
) and env.test_valid_position(env.robots[0], target_pos)
- restoreState(state_id)
+ p.restoreState(state_id)
if reset_success:
break
diff --git a/igibson/tasks/room_rearrangement_task.py b/igibson/tasks/room_rearrangement_task.py
index 2313fb7a9..6ce68a947 100644
--- a/igibson/tasks/room_rearrangement_task.py
+++ b/igibson/tasks/room_rearrangement_task.py
@@ -9,7 +9,6 @@
from igibson.termination_conditions.max_collision import MaxCollision
from igibson.termination_conditions.out_of_bound import OutOfBound
from igibson.termination_conditions.timeout import Timeout
-from igibson.utils.utils import restoreState
class RoomRearrangementTask(BaseTask):
@@ -102,7 +101,7 @@ def reset_agent(self, env):
for _ in range(max_trials):
initial_pos, initial_orn = self.sample_initial_pose(env)
reset_success = env.test_valid_position(env.robots[0], initial_pos, initial_orn)
- restoreState(state_id)
+ p.restoreState(state_id)
if reset_success:
break
@@ -112,6 +111,9 @@ def reset_agent(self, env):
env.land(env.robots[0], initial_pos, initial_orn)
p.removeState(state_id)
+ for reward_function in self.reward_functions:
+ reward_function.reset(self, env)
+
def get_task_obs(self, env):
"""
No task-specific observation
diff --git a/igibson/tasks/task_base.py b/igibson/tasks/task_base.py
index a20e2f617..90e242725 100644
--- a/igibson/tasks/task_base.py
+++ b/igibson/tasks/task_base.py
@@ -34,23 +34,6 @@ def reset_agent(self, env):
"""
raise NotImplementedError()
- def reset_variables(self, env):
- """
- Task-specific variable reset
-
- :param env: environment instance
- """
- return
-
- def reset(self, env):
- self.reset_scene(env)
- self.reset_agent(env)
- self.reset_variables(env)
- for reward_function in self.reward_functions:
- reward_function.reset(self, env)
- for termination_condition in self.termination_conditions:
- termination_condition.reset(self, env)
-
def get_reward(self, env, collision_links=[], action=None, info={}):
"""
Aggreate reward functions
diff --git a/igibson/termination_conditions/predicate_goal.py b/igibson/termination_conditions/predicate_goal.py
deleted file mode 100644
index e261ea4ff..000000000
--- a/igibson/termination_conditions/predicate_goal.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from bddl.activity import evaluate_goal_conditions
-
-from igibson.termination_conditions.termination_condition_base import BaseTerminationCondition
-from igibson.utils.utils import l2_distance
-
-
-class PredicateGoal(BaseTerminationCondition):
- """
- PredicateGoal used for BehaviorTask
- Episode terminates if all the predicates are satisfied
- """
-
- def __init__(self, config):
- super(PredicateGoal, self).__init__(config)
-
- def get_termination(self, task, env):
- """
- Return whether the episode should terminate.
- Terminate if point goal is reached (distance below threshold)
-
- :param task: task instance
- :param env: environment instance
- :return: done, info
- """
- done, _ = evaluate_goal_conditions(task.goal_conditions)
- success = done
- return done, success
diff --git a/igibson/termination_conditions/termination_condition_base.py b/igibson/termination_conditions/termination_condition_base.py
index 91fc49ac2..e01329569 100644
--- a/igibson/termination_conditions/termination_condition_base.py
+++ b/igibson/termination_conditions/termination_condition_base.py
@@ -22,12 +22,3 @@ def get_termination(self, task, env):
:return: done, info
"""
raise NotImplementedError()
-
- def reset(self, task, env):
- """
- Termination condition-specific reset
-
- :param task: task instance
- :param env: environment instance
- """
- return
diff --git a/tests/benchmark/benchmark_interactive_scene.py b/igibson/test/benchmark/benchmark_interactive_scene.py
similarity index 96%
rename from tests/benchmark/benchmark_interactive_scene.py
rename to igibson/test/benchmark/benchmark_interactive_scene.py
index 7c1861340..a3d591e30 100644
--- a/tests/benchmark/benchmark_interactive_scene.py
+++ b/igibson/test/benchmark/benchmark_interactive_scene.py
@@ -8,7 +8,7 @@
import igibson
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import get_ig_assets_version
@@ -16,7 +16,7 @@
def benchmark_scene(scene_name, optimized=False, import_robot=True):
- config = parse_config(os.path.join(os.path.dirname(__file__), "..", "test.yaml"))
+ config = parse_config(os.path.join(igibson.root_path, "test", "test.yaml"))
assets_version = get_ig_assets_version()
print("assets_version", assets_version)
scene = InteractiveIndoorScene(
@@ -31,7 +31,7 @@ def benchmark_scene(scene_name, optimized=False, import_robot=True):
rendering_settings=settings,
)
start = time.time()
- s.import_scene(scene)
+ s.import_ig_scene(scene)
print(time.time() - start)
if import_robot:
diff --git a/tests/benchmark/benchmark_interactive_scene_rendering.py b/igibson/test/benchmark/benchmark_interactive_scene_rendering.py
similarity index 95%
rename from tests/benchmark/benchmark_interactive_scene_rendering.py
rename to igibson/test/benchmark/benchmark_interactive_scene_rendering.py
index d81960f45..da5e7b295 100644
--- a/tests/benchmark/benchmark_interactive_scene_rendering.py
+++ b/igibson/test/benchmark/benchmark_interactive_scene_rendering.py
@@ -8,7 +8,7 @@
import numpy as np
import igibson
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import get_ig_assets_version
@@ -17,7 +17,7 @@
def benchmark_rendering(scene_list, rendering_presets_list, modality_list):
- config = parse_config(os.path.join(os.path.dirname(__file__), "..", "test.yaml"))
+ config = parse_config(os.path.join(igibson.root_path, "test", "test.yaml"))
assets_version = get_ig_assets_version()
print("assets_version", assets_version)
result = {}
@@ -39,7 +39,7 @@ def benchmark_rendering(scene_list, rendering_presets_list, modality_list):
rendering_settings=settings,
physics_timestep=1 / 240.0,
)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
diff --git a/tests/benchmark/benchmark_pbr.py b/igibson/test/benchmark/benchmark_pbr.py
similarity index 97%
rename from tests/benchmark/benchmark_pbr.py
rename to igibson/test/benchmark/benchmark_pbr.py
index e88f08fbe..e167882c2 100644
--- a/tests/benchmark/benchmark_pbr.py
+++ b/igibson/test/benchmark/benchmark_pbr.py
@@ -21,7 +21,7 @@ def benchmark(render_to_tensor=False, resolution=512, obj_num=100, optimized=Tru
renderer = MeshRenderer(width=resolution, height=resolution, vertical_fov=90, rendering_settings=settings)
renderer.load_object("plane/plane_z_up_0.obj", scale=[3, 3, 3])
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
renderer.instances[-1].use_pbr = True
renderer.instances[-1].use_pbr_mapping = True
renderer.set_pose([0, 0, -1.5, 1, 0, 0.0, 0.0], -1)
@@ -47,7 +47,7 @@ def benchmark(render_to_tensor=False, resolution=512, obj_num=100, optimized=Tru
renderer.load_object(os.path.join(model_path, fn), scale=[scale, scale, scale])
for obj_i in range(obj_count_x):
for obj_j in range(obj_count_x):
- renderer.add_instance_group([i])
+ renderer.add_instance(i)
renderer.set_pose(
[
obj_i - obj_count_x / 2.0,
diff --git a/tests/benchmark/benchmark_static_scene.py b/igibson/test/benchmark/benchmark_static_scene.py
similarity index 89%
rename from tests/benchmark/benchmark_static_scene.py
rename to igibson/test/benchmark/benchmark_static_scene.py
index 86d76c3c8..7ba972467 100644
--- a/tests/benchmark/benchmark_static_scene.py
+++ b/igibson/test/benchmark/benchmark_static_scene.py
@@ -4,19 +4,15 @@
import matplotlib.pyplot as plt
import igibson
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.simulator import Simulator
from igibson.utils.utils import parse_config
def benchmark(render_to_tensor=False, resolution=512):
- config = parse_config(os.path.join(os.path.dirname(__file__), "..", "test.yaml"))
- if render_to_tensor:
- mode = "headless_tensor"
- else:
- mode = "headless"
- s = Simulator(mode=mode, image_width=resolution, image_height=resolution)
+ config = parse_config(os.path.join(igibson.root_path, "test", "test.yaml"))
+ s = Simulator(mode="headless", image_width=resolution, image_height=resolution, render_to_tensor=render_to_tensor)
scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True)
s.import_scene(scene)
turtlebot = Turtlebot(config)
diff --git a/igibson/test/disabled/test_amazon_avg_obj_dims.py b/igibson/test/disabled/test_amazon_avg_obj_dims.py
new file mode 100644
index 000000000..6b26c1a17
--- /dev/null
+++ b/igibson/test/disabled/test_amazon_avg_obj_dims.py
@@ -0,0 +1,355 @@
+import json
+import os
+import time
+import urllib.request
+
+import numpy as np
+from mws import mws
+
+# More docuemntation can be found in here: https://docs.google.com/document/d/1sTkbLHr-tNhkGnGwpl1SWxjzFHy87PLzu0FmwPXlqKQ/edit?usp=sharing
+# The handpicked ASINs and the object dims for NOT_IN_AMAZON can be found here: https://docs.google.com/spreadsheets/d/1D5LJDqY0hzyCuMSSZS34nP0H7Vi6Kq2RxQWon2Sg_mg/edit?usp=sharing
+
+# These object classes DO exist Amazon and we can retrieve a list of object
+# instances that are close to our object models through a direct keyword search
+DIRECT_QUERY = [
+ "basket",
+ "bathtub",
+ "bench",
+ "bottom_cabinet",
+ "bottom_cabinet_no_top",
+ "carpet",
+ "chair",
+ "chest",
+ "coffee_machine",
+ "coffee_table",
+ "console_table",
+ "cooktop",
+ "crib",
+ "cushion",
+ "dishwasher",
+ "dryer",
+ "fence",
+ "floor_lamp",
+ "fridge",
+ "grandfather_clock",
+ "guitar",
+ "heater",
+ "laptop",
+ "loudspeaker",
+ "microwave",
+ "mirror",
+ "monitor",
+ "office_chair",
+ "oven",
+ "picture",
+ "plant",
+ "pool_table",
+ "range_hood",
+ "shelf",
+ "shower",
+ "sink",
+ "sofa",
+ "sofa_chair",
+ "speaker_system",
+ "standing_tv",
+ "stool",
+ "stove",
+ "table",
+ "table_lamp",
+ "toilet",
+ "top_cabinet",
+ "towel_rack",
+ "trash_can",
+ "treadmill",
+ "wall_clock",
+ "wall_mounted_tv",
+ "washer",
+]
+
+DIRECT_QUERY_SUB = {
+ "bathtub": "freestanding bathtub",
+ "bottom cabinet": "base cabinet",
+ "bottom cabinet_no_top": "base cabinet",
+ "carpet": "rug",
+ "cushion": "pillow",
+ "dryer": "front load dryer",
+ "fridge": "top freezer fridge",
+ "heater": "wall heater",
+ "mirror": "wall mirror",
+ "oven": "wall oven",
+ "picture": "canvas art",
+ "range hood": "wall mount range hood",
+ "shelf": "book shelf",
+ "shower": "corner steam shower",
+ "sink": "bathroom sink",
+ "sofa chair": "arm chair",
+ "speaker system": "home speaker system",
+ "standing tv": "tv",
+ "stove": "gas range",
+ "table": "dining table",
+ "top cabinet": "wall mounted cabinet",
+ "wall_mounted tv": "tv",
+ "washer": "front load washing machine",
+}
+
+# These object classes DO NOT exist on Amazon. Need to find info manually.
+NOT_IN_AMAZON = [
+ "bed",
+ "counter",
+ "door",
+ "piano",
+ "window",
+]
+
+
+def get_product_api():
+ access_key = "AKIAJHPQJIW6QR5LWITQ"
+ seller_id = "A1Q8GZYLKNKOGP"
+ secret_key = "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"
+ marketplace_usa = "ATVPDKIKX0DER"
+ products_api = mws.Products(access_key, secret_key, seller_id, region="US")
+ return products_api, marketplace_usa
+
+
+def query_amazon(products_api, marketplace_usa):
+ root_dir = "/cvgl2/u/chengshu/ig_dataset"
+ obj_dir = os.path.join(root_dir, "objects")
+ obj_dim_dir = os.path.join(root_dir, "object_dims")
+
+ for obj_class in sorted(os.listdir(obj_dir)):
+ obj_class_query = obj_class.replace("_", " ")
+ if obj_class not in ["top_cabinet", "bottom_cabinet"]:
+ continue
+ if obj_class not in DIRECT_QUERY:
+ continue
+ if obj_class_query in DIRECT_QUERY_SUB:
+ obj_class_query = DIRECT_QUERY_SUB[obj_class_query]
+ obj_class_dir = os.path.join(obj_dim_dir, obj_class)
+ os.makedirs(obj_class_dir, exist_ok=True)
+ products = products_api.list_matching_products(marketplaceid=marketplace_usa, query=obj_class_query)
+ assert products.response.status_code == 200, "API failed"
+
+ valid_item = 0
+ dims = []
+ for product in products.parsed.Products.Product:
+ # no valid dimension, skip
+ if "ItemDimensions" not in product.AttributeSets.ItemAttributes:
+ continue
+
+ item_dim = product.AttributeSets.ItemAttributes.ItemDimensions
+ item_dim_keys = item_dim.keys()
+ has_valid_dim = (
+ "Width" in item_dim_keys
+ and "Length" in item_dim_keys
+ and "Height" in item_dim_keys
+ and "Weight" in item_dim_keys
+ )
+ # no valid dimension, skip
+ if not has_valid_dim:
+ continue
+
+ # no thumbnail image, skip
+ if "SmallImage" not in product.AttributeSets.ItemAttributes:
+ continue
+
+ ASIN = product.Identifiers.MarketplaceASIN.ASIN
+
+ item_dim_array = [
+ item_dim.Width.value,
+ item_dim.Length.value,
+ item_dim.Height.value,
+ item_dim.Weight.value,
+ ]
+ item_dim_array = [float(elem) for elem in item_dim_array]
+ if obj_class in [
+ "bathtub",
+ "bench",
+ "chest",
+ "coffee_table",
+ "console_table",
+ "cooktop",
+ "crib",
+ "cushion",
+ "fence",
+ "heater",
+ "laptop",
+ "microwave",
+ "monitor",
+ "pool_table",
+ "range_hood",
+ "shelf",
+ "shower",
+ "sofa",
+ "table",
+ "towel_rack",
+ "standing_tv",
+ "wall_mounted_tv",
+ ]:
+ short_side = min(item_dim_array[:2])
+ long_side = max(item_dim_array[:2])
+ item_dim_array[0] = long_side
+ item_dim_array[1] = short_side
+ elif obj_class in ["table_lamp"]:
+ short_side = min(item_dim_array[:2])
+ long_side = max(item_dim_array[:2])
+ item_dim_array[0] = short_side
+ item_dim_array[1] = long_side
+ else:
+ short_side, long_side, longest_side = sorted(item_dim_array[:3])
+ if obj_class in ["guitar", "mirror", "picture", "wall_clock"]:
+ item_dim_array[0] = long_side
+ item_dim_array[1] = short_side
+ item_dim_array[2] = longest_side
+ elif obj_class in ["carpet"]:
+ item_dim_array[0] = longest_side
+ item_dim_array[1] = long_side
+ item_dim_array[2] = short_side
+
+ img_url = product.AttributeSets.ItemAttributes.SmallImage.URL
+
+ dim_txt = os.path.join(obj_class_dir, ASIN + ".txt")
+ with open(dim_txt, "w+") as f:
+ f.write(" ".join([str(elem) for elem in item_dim_array]))
+ img_path = os.path.join(obj_class_dir, ASIN + ".jpg")
+ urllib.request.urlretrieve(img_url, img_path)
+ valid_item += 1
+ dims.append(item_dim_array)
+
+ dims = np.array(dims)
+ # inch to cm
+ dims[:, :3] *= 2.54
+ # pound to kg
+ dims[:, 3] *= 0.453592
+ print(f"{obj_class} {obj_class_query}: {valid_item} valid items.")
+ print(
+ "avg W x L x H (in cm), weight (in kg): {} {} {} {}".format(
+ np.median(dims[:, 0]), np.median(dims[:, 1]), np.median(dims[:, 2]), np.median(dims[:, 3])
+ )
+ )
+ # Request quota is once every 5 seconds.
+ time.sleep(6.0)
+
+
+def check_weight():
+ root_dir = "/cvgl2/u/chengshu/ig_dataset"
+ obj_dim_dir = os.path.join(root_dir, "object_dims")
+ non_amazon_csv = os.path.join(obj_dim_dir, "non_amazon.csv")
+ assert os.path.isfile(
+ non_amazon_csv
+ ), f"please download non-amazon dimensions and put it in this path: {non_amazon_csv}"
+
+ obj_dim_dict = {}
+ for obj_class in sorted(os.listdir(obj_dim_dir)):
+ obj_dir = os.path.join(obj_dim_dir, obj_class)
+ if not os.path.isdir(obj_dir):
+ continue
+ dims = []
+ for txt_file in os.listdir(obj_dir):
+ if not txt_file.endswith(".txt"):
+ continue
+ txt_file = os.path.join(obj_dir, txt_file)
+ with open(txt_file) as f:
+ dims.append([float(item) for item in f.readline().strip().split()])
+ dims = np.array(dims)
+ # inch to cm
+ dims[:, :3] *= 2.54
+ # pound to kg
+ dims[:, 3] *= 0.453592
+ print(f"{obj_class}: {len(dims)} valid items.")
+ print(
+ "avg W x L x H (in cm), weight (in kg): {} {} {} {}".format(
+ np.median(dims[:, 0]), np.median(dims[:, 1]), np.median(dims[:, 2]), np.median(dims[:, 3])
+ )
+ )
+ obj_dim_dict[obj_class] = {
+ "size": [np.median(dims[:, 0] * 0.01), np.median(dims[:, 1] * 0.01), np.median(dims[:, 2] * 0.01)],
+ "mass": np.median(dims[:, 3]),
+ }
+
+ with open(non_amazon_csv) as f:
+ # skip first row
+ f.readline()
+ for line in f.readlines():
+ line = line.strip().split(",")
+ obj_class, width, length, height, weight = line
+ width, length, height, weight = (
+ float(width) * 2.54,
+ float(length) * 2.54,
+ float(height) * 2.54,
+ float(weight) * 0.453592,
+ )
+ print(f"{obj_class}:")
+ print("avg W x L x H (in cm), weight (in kg): {} {} {} {}".format(width, length, height, weight))
+ obj_dim_dict[obj_class] = {"size": [width * 0.01, length * 0.01, height * 0.01], "mass": weight}
+
+ for obj_class in obj_dim_dict:
+ volume = np.prod(obj_dim_dict[obj_class]["size"])
+ obj_dim_dict[obj_class]["density"] = obj_dim_dict[obj_class]["mass"] / volume
+
+ avg_obj_dims = os.path.join(obj_dim_dir, "avg_obj_dims.json")
+ with open(avg_obj_dims, "w+") as f:
+ json.dump(obj_dim_dict, f)
+
+
+# For testing purposes
+def query_single_product(products_api, marketplace_usa, obj_class_query):
+ products = products_api.list_matching_products(marketplaceid=marketplace_usa, query=obj_class_query)
+ assert products.response.status_code == 200, "API failed"
+
+ dims = []
+ for product in products.parsed.Products.Product:
+ # no valid dimension, skip
+ if "ItemDimensions" not in product.AttributeSets.ItemAttributes:
+ continue
+
+ item_dim = product.AttributeSets.ItemAttributes.ItemDimensions
+ item_dim_keys = item_dim.keys()
+ has_valid_dim = (
+ "Width" in item_dim_keys
+ and "Length" in item_dim_keys
+ and "Height" in item_dim_keys
+ and "Weight" in item_dim_keys
+ )
+ # no valid dimension, skip
+ if not has_valid_dim:
+ continue
+
+ # no thumbnail image, skip
+ if "SmallImage" not in product.AttributeSets.ItemAttributes:
+ continue
+
+ item_dim_array = [
+ item_dim.Width.value,
+ item_dim.Length.value,
+ item_dim.Height.value,
+ item_dim.Weight.value,
+ ]
+ dims.append([float(item) for item in item_dim_array])
+ ASIN = product.Identifiers.MarketplaceASIN.ASIN
+ print(ASIN)
+
+ dims = np.array(dims)
+ # inch to cm
+ dims[:, :3] *= 2.54
+ # pound to kg
+ dims[:, 3] *= 0.453592
+ print(dims)
+ print(
+ "{} avg W x L x H (in cm), weight (in kg): {} {} {} {}".format(
+ obj_class_query, np.median(dims[:, 0]), np.median(dims[:, 1]), np.median(dims[:, 2]), np.median(dims[:, 3])
+ )
+ )
+
+
+def main():
+ products_api, marketplace_usa = get_product_api()
+ # query_single_product(products_api, marketplace_usa,
+ # 'base cabinet')
+ query_amazon(products_api, marketplace_usa)
+ # manually download csv file from non-Amazon products from
+ # https://docs.google.com/spreadsheets/d/1D5LJDqY0hzyCuMSSZS34nP0H7Vi6Kq2RxQWon2Sg_mg/edit#gid=0
+ check_weight()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/test/disabled/test_joint_limit.py b/igibson/test/disabled/test_joint_limit.py
new file mode 100644
index 000000000..dc70d0e75
--- /dev/null
+++ b/igibson/test/disabled/test_joint_limit.py
@@ -0,0 +1,117 @@
+import json
+import os
+
+import numpy as np
+import pybullet as p
+from PIL import Image
+from transforms3d.euler import euler2quat
+
+from igibson.external.pybullet_tools.utils import get_center_extent, stable_z_on_aabb
+from igibson.objects.articulated_object import ArticulatedObject
+from igibson.simulator import Simulator
+from igibson.utils.utils import quatToXYZW
+
+
+def main():
+ step_per_sec = 100
+ num_directions = 12
+ obj_count = 0
+ root_dir = "/cvgl2/u/chengshu/ig_dataset_v5/objects"
+
+ s = Simulator(mode="headless", image_width=512, image_height=512, physics_timestep=1 / float(step_per_sec))
+ p.setGravity(0.0, 0.0, 0.0)
+
+ for obj_class_dir in sorted(os.listdir(root_dir)):
+ obj_class_dir = os.path.join(root_dir, obj_class_dir)
+ for obj_inst_dir in os.listdir(obj_class_dir):
+ obj_inst_name = obj_inst_dir
+ urdf_path = obj_inst_name + ".urdf"
+ obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir)
+ urdf_path = os.path.join(obj_inst_dir, urdf_path)
+
+ obj = ArticulatedObject(urdf_path)
+ s.import_object(obj)
+
+ with open(os.path.join(obj_inst_dir, "misc/bbox.json"), "r") as bbox_file:
+ bbox_data = json.load(bbox_file)
+ bbox_max = np.array(bbox_data["max"])
+ bbox_min = np.array(bbox_data["min"])
+ offset = -(bbox_max + bbox_min) / 2.0
+
+ z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]])
+
+ obj.set_position([offset[0], offset[1], z])
+ _, extent = get_center_extent(obj.body_id)
+
+ max_half_extent = max(extent) / 2.0
+ px = max_half_extent * 3.0
+ py = 0.0
+ pz = extent[2] / 2.0
+ camera_pose = np.array([px, py, pz])
+
+ s.renderer.set_camera(camera_pose, [0, 0, pz], [0, 0, 1])
+
+ num_joints = p.getNumJoints(obj.body_id)
+ if num_joints == 0:
+ s.reload()
+ continue
+
+ # collect joint low/high limit
+ joint_low = []
+ joint_high = []
+ for j in range(num_joints):
+ j_low, j_high = p.getJointInfo(obj.body_id, j)[8:10]
+ joint_low.append(j_low)
+ joint_high.append(j_high)
+
+ # set joints to their lowest limits
+ for j, j_low in zip(range(num_joints), joint_low):
+ p.resetJointState(obj.body_id, j, targetValue=j_low, targetVelocity=0.0)
+ s.sync()
+
+ # render the images
+ joint_low_imgs = []
+ for i in range(num_directions):
+ yaw = np.pi * 2.0 / num_directions * i
+ obj.set_orientation(quatToXYZW(euler2quat(0.0, 0.0, yaw), "wxyz"))
+ s.sync()
+ rgb, three_d = s.renderer.render(modes=("rgb", "3d"))
+ depth = -three_d[:, :, 2]
+ rgb[depth == 0] = 1.0
+ joint_low_imgs.append(Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8)))
+
+ # set joints to their highest limits
+ for j, j_high in zip(range(num_joints), joint_high):
+ p.resetJointState(obj.body_id, j, targetValue=j_high, targetVelocity=0.0)
+ s.sync()
+
+ # render the images
+ joint_high_imgs = []
+ for i in range(num_directions):
+ yaw = np.pi * 2.0 / num_directions * i
+ obj.set_orientation(quatToXYZW(euler2quat(0.0, 0.0, yaw), "wxyz"))
+ s.sync()
+ rgb, three_d = s.renderer.render(modes=("rgb", "3d"))
+ depth = -three_d[:, :, 2]
+ rgb[depth == 0] = 1.0
+ joint_high_imgs.append(Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8)))
+
+ # concatenate the images
+ imgs = []
+ for im1, im2 in zip(joint_low_imgs, joint_high_imgs):
+ dst = Image.new("RGB", (im1.width + im2.width, im1.height))
+ dst.paste(im1, (0, 0))
+ dst.paste(im2, (im1.width, 0))
+ imgs.append(dst)
+ gif_path = "{}/visualizations/{}_joint_limit.gif".format(obj_inst_dir, obj_inst_name)
+
+ # save the gif
+ imgs[0].save(gif_path, save_all=True, append_images=imgs[1:], optimize=True, duration=200, loop=0)
+
+ s.reload()
+ obj_count += 1
+ print(obj_count, gif_path)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/test/disabled/test_physics_realism.py b/igibson/test/disabled/test_physics_realism.py
new file mode 100644
index 000000000..e168496a1
--- /dev/null
+++ b/igibson/test/disabled/test_physics_realism.py
@@ -0,0 +1,437 @@
+import json
+import os
+import xml.etree.ElementTree as ET
+
+import numpy as np
+import pybullet as p
+import trimesh
+from IPython import embed
+from PIL import Image
+
+import igibson
+from igibson.external.pybullet_tools.utils import get_center_extent, stable_z_on_aabb
+from igibson.objects.articulated_object import ArticulatedObject
+from igibson.objects.visual_marker import VisualMarker
+from igibson.scenes.empty_scene import EmptyScene
+from igibson.simulator import Simulator
+from igibson.utils.urdf_utils import round_up, save_urdfs_without_floating_joints
+from igibson.utils.utils import rotate_vector_3d
+
+SELECTED_CLASSES = ["window"]
+SELECTED_INSTANCES = "103070"
+
+
+def save_scaled_urdf(filename, avg_size_mass, obj_class):
+ model_path = os.path.dirname(filename)
+ meta_json = os.path.join(model_path, "misc/metadata.json")
+
+ if os.path.isfile(meta_json):
+ with open(meta_json, "r") as f:
+ meta_data = json.load(f)
+ bbox_size = np.array(meta_data["bbox_size"])
+ base_link_offset = np.array(meta_data["base_link_offset"])
+ else:
+ bbox_json = os.path.join(model_path, "misc/bbox.json")
+ with open(bbox_json, "r") as bbox_file:
+ bbox_data = json.load(bbox_file)
+ bbox_max = np.array(bbox_data["max"])
+ bbox_min = np.array(bbox_data["min"])
+ bbox_size = bbox_max - bbox_min
+ base_link_offset = (bbox_min + bbox_max) / 2.0
+
+ bounding_box = np.array(avg_size_mass["size"])
+ scale = bounding_box / bbox_size
+ # scale = np.array([1.0, 1.0, 1.0])
+
+ object_tree = ET.parse(filename)
+ # We need to scale 1) the meshes, 2) the position of meshes, 3) the position of joints, 4) the orientation axis of joints
+ # The problem is that those quantities are given wrt. its parent link frame, and this can be rotated wrt. the frame the scale was given in
+ # Solution: parse the kin tree joint by joint, extract the rotation, rotate the scale, apply rotated scale to 1, 2, 3, 4 in the child link frame
+
+ # First, define the scale in each link reference frame
+ # and apply it to the joint values
+ scales_in_lf = {}
+ scales_in_lf["base_link"] = scale
+ all_processed = False
+ while not all_processed:
+ all_processed = True
+ for joint in object_tree.iter("joint"):
+ parent_link_name = joint.find("parent").attrib["link"]
+ child_link_name = joint.find("child").attrib["link"]
+ if parent_link_name in scales_in_lf and child_link_name not in scales_in_lf:
+ scale_in_parent_lf = scales_in_lf[parent_link_name]
+ # The location of the joint frame is scaled using the scale in the parent frame
+ for origin in joint.iter("origin"):
+ current_origin_xyz = np.array([float(val) for val in origin.attrib["xyz"].split(" ")])
+ new_origin_xyz = np.multiply(current_origin_xyz, scale_in_parent_lf)
+ new_origin_xyz = np.array([round_up(val, 4) for val in new_origin_xyz])
+ origin.attrib["xyz"] = " ".join(map(str, new_origin_xyz))
+
+ # scale the prismatic joint
+ if joint.attrib["type"] == "prismatic":
+ limits = joint.findall("limit")
+ assert len(limits) == 1
+ limit = limits[0]
+ axes = joint.findall("axis")
+ assert len(axes) == 1
+ axis = axes[0]
+ axis_np = np.array([float(elem) for elem in axis.attrib["xyz"].split()])
+ major_axis = np.argmax(np.abs(axis_np))
+ # assume the prismatic joint is roughly axis-aligned
+ limit.attrib["upper"] = str(float(limit.attrib["upper"]) * scale_in_parent_lf[major_axis])
+ limit.attrib["lower"] = str(float(limit.attrib["lower"]) * scale_in_parent_lf[major_axis])
+
+ # Get the rotation of the joint frame and apply it to the scale
+ if "rpy" in joint.keys():
+ joint_frame_rot = np.array([float(val) for val in joint.attrib["rpy"].split(" ")])
+ # Rotate the scale
+ scale_in_child_lf = rotate_vector_3d(scale_in_parent_lf, *joint_frame_rot, cck=True)
+ scale_in_child_lf = np.absolute(scale_in_child_lf)
+ else:
+ scale_in_child_lf = scale_in_parent_lf
+
+ # print("Adding: ", joint.find("child").attrib["link"])
+
+ scales_in_lf[joint.find("child").attrib["link"]] = scale_in_child_lf
+
+ # The axis of the joint is defined in the joint frame, we scale it after applying the rotation
+ for axis in joint.iter("axis"):
+ current_axis_xyz = np.array([float(val) for val in axis.attrib["xyz"].split(" ")])
+ new_axis_xyz = np.multiply(current_axis_xyz, scale_in_child_lf)
+ new_axis_xyz /= np.linalg.norm(new_axis_xyz)
+ new_axis_xyz = np.array([round_up(val, 4) for val in new_axis_xyz])
+ axis.attrib["xyz"] = " ".join(map(str, new_axis_xyz))
+
+ # Iterate again the for loop since we added new elements to the dictionary
+ all_processed = False
+
+ all_links = object_tree.findall("link")
+ all_links_trimesh = []
+ total_volume = 0.0
+ for link in all_links:
+ meshes = link.findall("collision/geometry/mesh")
+ if len(meshes) == 0:
+ all_links_trimesh.append(None)
+ continue
+ assert len(meshes) == 1, (filename, link.attrib["name"])
+ collision_mesh_path = os.path.join(model_path, meshes[0].attrib["filename"])
+ trimesh_obj = trimesh.load(file_obj=collision_mesh_path)
+ all_links_trimesh.append(trimesh_obj)
+ volume = trimesh_obj.volume
+ if link.attrib["name"] == "base_link":
+ if obj_class in ["lamp"]:
+ volume *= 10.0
+ total_volume += volume
+
+ # Scale the mass based on bounding box size
+ # TODO: how to scale moment of inertia?
+ total_mass = avg_size_mass["density"] * bounding_box[0] * bounding_box[1] * bounding_box[2]
+ print("total_mass", total_mass)
+
+ density = total_mass / total_volume
+ print("avg density", density)
+ for trimesh_obj in all_links_trimesh:
+ if trimesh_obj is not None:
+ trimesh_obj.density = density
+
+ assert len(all_links) == len(all_links_trimesh)
+
+ # Now iterate over all links and scale the meshes and positions
+ for link, link_trimesh in zip(all_links, all_links_trimesh):
+ inertials = link.findall("inertial")
+ if len(inertials) == 0:
+ inertial = ET.SubElement(link, "inertial")
+ else:
+ assert len(inertials) == 1
+ inertial = inertials[0]
+
+ masses = inertial.findall("mass")
+ if len(masses) == 0:
+ mass = ET.SubElement(inertial, "mass")
+ else:
+ assert len(masses) == 1
+ mass = masses[0]
+
+ inertias = inertial.findall("inertia")
+ if len(inertias) == 0:
+ inertia = ET.SubElement(inertial, "inertia")
+ else:
+ assert len(inertias) == 1
+ inertia = inertias[0]
+
+ origins = inertial.findall("origin")
+ if len(origins) == 0:
+ origin = ET.SubElement(inertial, "origin")
+ else:
+ assert len(origins) == 1
+ origin = origins[0]
+
+ if link_trimesh is not None:
+ if link.attrib["name"] == "base_link":
+ if obj_class in ["lamp"]:
+ link_trimesh.density *= 10.0
+
+ if link_trimesh.is_watertight:
+ center = link_trimesh.center_mass
+ else:
+ center = link_trimesh.centroid
+
+ # The inertial frame origin will be scaled down below.
+ # Here, it has the value BEFORE scaling
+ origin.attrib["xyz"] = " ".join(map(str, center))
+ origin.attrib["rpy"] = " ".join(map(str, [0.0, 0.0, 0.0]))
+
+ mass.attrib["value"] = str(round_up(link_trimesh.mass, 4))
+ moment_of_inertia = link_trimesh.moment_inertia
+ inertia.attrib["ixx"] = str(moment_of_inertia[0][0])
+ inertia.attrib["ixy"] = str(moment_of_inertia[0][1])
+ inertia.attrib["ixz"] = str(moment_of_inertia[0][2])
+ inertia.attrib["iyy"] = str(moment_of_inertia[1][1])
+ inertia.attrib["iyz"] = str(moment_of_inertia[1][2])
+ inertia.attrib["izz"] = str(moment_of_inertia[2][2])
+ else:
+ # empty link that does not have any mesh
+ origin.attrib["xyz"] = " ".join(map(str, [0.0, 0.0, 0.0]))
+ origin.attrib["rpy"] = " ".join(map(str, [0.0, 0.0, 0.0]))
+ mass.attrib["value"] = str(0.0)
+ inertia.attrib["ixx"] = str(0.0)
+ inertia.attrib["ixy"] = str(0.0)
+ inertia.attrib["ixz"] = str(0.0)
+ inertia.attrib["iyy"] = str(0.0)
+ inertia.attrib["iyz"] = str(0.0)
+ inertia.attrib["izz"] = str(0.0)
+
+ scale_in_lf = scales_in_lf[link.attrib["name"]]
+ # Apply the scale to all mesh elements within the link (original scale and origin)
+ for mesh in link.iter("mesh"):
+ if "scale" in mesh.attrib:
+ mesh_scale = np.array([float(val) for val in mesh.attrib["scale"].split(" ")])
+ new_scale = np.multiply(mesh_scale, scale_in_lf)
+ new_scale = np.array([round_up(val, 4) for val in new_scale])
+ mesh.attrib["scale"] = " ".join(map(str, new_scale))
+ else:
+ new_scale = np.array([round_up(val, 4) for val in scale_in_lf])
+ mesh.set("scale", " ".join(map(str, new_scale)))
+ for origin in link.iter("origin"):
+ origin_xyz = np.array([float(val) for val in origin.attrib["xyz"].split(" ")])
+ new_origin_xyz = np.multiply(origin_xyz, scale_in_lf)
+ new_origin_xyz = np.array([round_up(val, 4) for val in new_origin_xyz])
+ origin.attrib["xyz"] = " ".join(map(str, new_origin_xyz))
+
+ new_filename = filename[:-5] + "_avg_size"
+ urdfs_no_floating = save_urdfs_without_floating_joints(object_tree, new_filename, False)
+
+ # If the object is broken down to multiple URDF files, we only want to
+ # visulize the main URDF (e.g. visusalize the bed and ignore the pillows)
+ # The main URDF is the one with the highest mass.
+ max_mass = 0.0
+ main_urdf_file = None
+ for key in urdfs_no_floating:
+ object_tree = ET.parse(urdfs_no_floating[key][0])
+ cur_mass = 0.0
+ for mass in object_tree.iter("mass"):
+ cur_mass += float(mass.attrib["value"])
+ if cur_mass > max_mass:
+ max_mass = cur_mass
+ main_urdf_file = urdfs_no_floating[key][0]
+
+ assert main_urdf_file is not None
+
+ # Finally, we need to know where is the base_link origin wrt. the bounding box center. That allows us to place the model
+ # correctly since the joint transformations given in the scene urdf are for the bounding box center
+ # Coordinates of the bounding box center in the base_link frame
+ # We scale the location. We will subtract this to the joint location
+ scaled_bbxc_in_blf = -scale * base_link_offset
+
+ return main_urdf_file, scaled_bbxc_in_blf
+
+
+def get_avg_size_mass():
+
+ avg_obj_dims_json = os.path.join(igibson.ig_dataset_path, "objects/avg_category_specs.json")
+ with open(avg_obj_dims_json) as f:
+ avg_obj_dims = json.load(f)
+ return avg_obj_dims
+
+ # return {
+ # 'lamp': {'size': [0.3, 0.4, 0.6], 'mass': 4, 'density': 4 / (0.3 * 0.3 * 1.0)},
+ # 'chair': {'size': [0.5, 0.5, 0.85], 'mass': 6, 'density': 6 / (0.5 * 0.5 * 0.85)},
+ # 'bed': {'size': [1.7, 2.2, 0.63], 'mass': 80, 'density': 80 / (1.7 * 2.2 * 0.63)},
+ # 'cushion': {'size': [0.4, 0.4, 0.25], 'mass': 1.3, 'density': 1.3 / (0.4 * 0.4 * 0.25)},
+ # 'piano': {'size': [1.16, 0.415, 1.0], 'mass': 225.0, 'density': 225.0 / (0.415 * 1.16 * 1.0)}
+ # }
+
+
+def save_scale_urdfs():
+ main_urdf_file_and_offset = {}
+ avg_size_mass = get_avg_size_mass()
+ # all_materials = set()
+ root_dir = "/cvgl2/u/chengshu/ig_dataset_v5/objects"
+ for obj_class_dir in os.listdir(root_dir):
+ obj_class = obj_class_dir
+ # if obj_class not in SELECTED_CLASSES:
+ # continue
+ obj_class_dir = os.path.join(root_dir, obj_class_dir)
+ for obj_inst_dir in os.listdir(obj_class_dir):
+ obj_inst_name = obj_inst_dir
+ if obj_inst_name not in SELECTED_INSTANCES:
+ continue
+ urdf_path = obj_inst_name + ".urdf"
+ obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir)
+ urdf_path = os.path.join(obj_inst_dir, urdf_path)
+ main_urdf_file, scaled_bbxc_in_blf = save_scaled_urdf(urdf_path, avg_size_mass[obj_class], obj_class)
+ main_urdf_file_and_offset[obj_inst_dir] = (main_urdf_file, scaled_bbxc_in_blf)
+ print(main_urdf_file)
+
+ return main_urdf_file_and_offset
+
+
+def render_physics_gifs(main_urdf_file_and_offset):
+ step_per_sec = 100
+ s = Simulator(mode="headless", image_width=512, image_height=512, physics_timestep=1 / float(step_per_sec))
+
+ root_dir = "/cvgl2/u/chengshu/ig_dataset_v5/objects"
+ obj_count = 0
+ for i, obj_class_dir in enumerate(sorted(os.listdir(root_dir))):
+ obj_class = obj_class_dir
+ # if obj_class not in SELECTED_CLASSES:
+ # continue
+ obj_class_dir = os.path.join(root_dir, obj_class_dir)
+ for obj_inst_dir in os.listdir(obj_class_dir):
+ # if obj_inst_dir != '14402':
+ # continue
+
+ imgs = []
+ scene = EmptyScene()
+ s.import_scene(scene, render_floor_plane=True)
+ obj_inst_name = obj_inst_dir
+ # urdf_path = obj_inst_name + '.urdf'
+ obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir)
+ urdf_path, offset = main_urdf_file_and_offset[obj_inst_dir]
+ # urdf_path = os.path.join(obj_inst_dir, urdf_path)
+ # print('urdf_path', urdf_path)
+
+ obj = ArticulatedObject(urdf_path)
+ s.import_object(obj)
+
+ push_visual_marker = VisualMarker(radius=0.1)
+ s.import_object(push_visual_marker)
+ push_visual_marker.set_position([100, 100, 0.0])
+ z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]])
+
+ # offset is translation from the bounding box center to the base link frame origin
+ # need to add another offset that is the translation from the base link frame origin to the inertial frame origin
+ # p.resetBasePositionAndOrientation() sets the inertial frame origin instead of the base link frame origin
+ # Assuming the bounding box center is at (0, 0, z) where z is half of the extent in z-direction
+ base_link_to_inertial = p.getDynamicsInfo(obj.body_id, -1)[3]
+ obj.set_position([offset[0] + base_link_to_inertial[0], offset[1] + base_link_to_inertial[1], z])
+
+ center, extent = get_center_extent(obj.body_id)
+ # the bounding box center should be at (0, 0) on the xy plane and the bottom of the bounding box should touch the ground
+ if not (np.linalg.norm(center[:2]) < 1e-3 and np.abs(center[2] - extent[2] / 2.0) < 1e-3):
+ print("-" * 50)
+ print(
+ "WARNING:",
+ urdf_path,
+ "xy error",
+ np.linalg.norm(center[:2]),
+ "z error",
+ np.abs(center[2] - extent[2] / 2.0),
+ )
+
+ height = extent[2]
+
+ max_half_extent = max(extent[0], extent[1]) / 2.0
+ px = max_half_extent * 2
+ py = max_half_extent * 2
+ pz = extent[2] * 1.2
+
+ camera_pose = np.array([px, py, pz])
+ # camera_pose = np.array([0.01, 0.01, 3])
+
+ s.renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1])
+ # drop 1 second
+ for _ in range(step_per_sec):
+ s.step()
+ frame = s.renderer.render(modes=("rgb"))
+ imgs.append(Image.fromarray((frame[0][:, :, :3] * 255).astype(np.uint8)))
+
+ ray_start = [max_half_extent * 1.5, max_half_extent * 1.5, 0]
+ ray_end = [-100.0, -100.0, 0]
+ unit_force = np.array([-1.0, -1.0, 0.0])
+ unit_force /= np.linalg.norm(unit_force)
+ force_mag = 100.0
+
+ ray_zs = [height * 0.5]
+ valid_ray_z = False
+ for trial in range(5):
+ for ray_z in ray_zs:
+ ray_start[2] = ray_z
+ ray_end[2] = ray_z
+ res = p.rayTest(ray_start, ray_end)
+ if res[0][0] == obj.body_id:
+ valid_ray_z = True
+ break
+ if valid_ray_z:
+ break
+ increment = 1 / (2 ** (trial + 1))
+ ray_zs = np.arange(increment / 2.0, 1.0, increment) * height
+
+ # push 4 seconds
+ for i in range(step_per_sec * 4):
+ res = p.rayTest(ray_start, ray_end)
+ object_id, link_id, _, hit_pos, hit_normal = res[0]
+ if object_id != obj.body_id:
+ break
+ push_visual_marker.set_position(hit_pos)
+ p.applyExternalForce(object_id, link_id, unit_force * force_mag, hit_pos, p.WORLD_FRAME)
+ s.step()
+ # print(hit_pos)
+ frame = s.renderer.render(modes=("rgb"))
+ rgb = frame[0][:, :, :3]
+ # add red border
+ border_width = 10
+ border_color = np.array([1.0, 0.0, 0.0])
+ rgb[:border_width, :, :] = border_color
+ rgb[-border_width:, :, :] = border_color
+ rgb[:, :border_width, :] = border_color
+ rgb[:, -border_width:, :] = border_color
+
+ imgs.append(Image.fromarray((rgb * 255).astype(np.uint8)))
+
+ gif_path = "{}/visualizations/{}_cm_physics_v3.gif".format(obj_inst_dir, obj_inst_name)
+ imgs = imgs[::4]
+ imgs[0].save(gif_path, save_all=True, append_images=imgs[1:], optimize=True, duration=40, loop=0)
+ obj_count += 1
+ # print(obj_count, gif_path, len(imgs), valid_ray_z, ray_z)
+ print(obj_count, gif_path)
+ s.reload()
+
+
+# for testing purposes
+def debug_renderer_scaling():
+ s = Simulator(mode="gui", image_width=512, image_height=512, physics_timestep=1 / float(100))
+ scene = EmptyScene()
+ s.import_scene(scene, render_floor_plane=True)
+ urdf_path = "/cvgl2/u/chengshu/ig_dataset_v5/objects/window/103070/103070_avg_size_0.urdf"
+
+ obj = ArticulatedObject(urdf_path)
+ s.import_object(obj)
+ obj.set_position([0, 0, 0])
+ embed()
+
+ z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]])
+ obj.set_position([0, 0, z])
+ embed()
+ for _ in range(100000000000):
+ s.step()
+
+
+def main():
+ # debug_renderer_scaling()
+ main_urdf_file_and_offset = save_scale_urdfs()
+ render_physics_gifs(main_urdf_file_and_offset)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/igibson/test/disabled/test_viewer.py b/igibson/test/disabled/test_viewer.py
new file mode 100644
index 000000000..d495f944b
--- /dev/null
+++ b/igibson/test/disabled/test_viewer.py
@@ -0,0 +1,7 @@
+from igibson.render.viewer import Viewer
+
+
+def test_viewer():
+ viewer = Viewer()
+ for i in range(100):
+ viewer.update()
diff --git a/tests/test.yaml b/igibson/test/test.yaml
similarity index 84%
rename from tests/test.yaml
rename to igibson/test/test.yaml
index 58811c5d3..939279c93 100644
--- a/tests/test.yaml
+++ b/igibson/test/test.yaml
@@ -13,17 +13,9 @@ texture_randomization_freq: null
object_randomization_freq: null
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: point_nav_random
@@ -78,4 +70,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
\ No newline at end of file
diff --git a/tests/test_binding.py b/igibson/test/test_binding.py
similarity index 54%
rename from tests/test_binding.py
rename to igibson/test/test_binding.py
index 6f968bb2a..bc7eeee71 100644
--- a/tests/test_binding.py
+++ b/igibson/test/test_binding.py
@@ -1,24 +1,20 @@
import platform
from igibson.render.mesh_renderer.get_available_devices import get_available_devices
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
def test_device():
- assert len(get_available_devices()[0]) > 0
+ assert len(get_available_devices()) > 0
def test_binding():
if platform.system() == "Darwin":
from igibson.render.mesh_renderer import GLFWRendererContext
- setting = MeshRendererSettings()
- r = GLFWRendererContext.GLFWRendererContext(
- 256, 256, setting.glfw_gl_version[0], setting.glfw_gl_version[1], False, False
- )
+ r = GLFWRendererContext.GLFWRendererContext(256, 256)
else:
from igibson.render.mesh_renderer import EGLRendererContext
- r = EGLRendererContext.EGLRendererContext(256, 256, get_available_devices()[0][0])
+ r = EGLRendererContext.EGLRendererContext(256, 256, get_available_devices()[0])
r.init()
r.release()
diff --git a/igibson/test/test_determinism_against_behavior.py b/igibson/test/test_determinism_against_behavior.py
new file mode 100644
index 000000000..8e9931baa
--- /dev/null
+++ b/igibson/test/test_determinism_against_behavior.py
@@ -0,0 +1,19 @@
+import os
+import tempfile
+
+import bddl
+
+import igibson
+from igibson.examples.behavior import behavior_demo_replay
+
+bddl.set_backend("iGibson")
+
+
+def test_determinism_with_existing_vr_demo():
+ DEMO_FILE = os.path.join(igibson.ig_dataset_path, "tests", "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5")
+
+ with tempfile.TemporaryDirectory() as directory:
+ replay_file = os.path.join(directory, "replay.hdf5")
+
+ # Replay the canonical demo.
+ behavior_demo_replay.safe_replay_demo(DEMO_FILE, out_log_path=replay_file, mode="headless")
diff --git a/igibson/test/test_determinism_against_same_version.py b/igibson/test/test_determinism_against_same_version.py
new file mode 100644
index 000000000..5a0442bd2
--- /dev/null
+++ b/igibson/test/test_determinism_against_same_version.py
@@ -0,0 +1,23 @@
+import os
+import tempfile
+
+import bddl
+
+from igibson.examples.behavior import behavior_demo_collection, behavior_demo_replay
+
+bddl.set_backend("iGibson")
+
+
+def test_determinism_with_new_demo():
+ # First record a random demo.
+ with tempfile.TemporaryDirectory() as directory:
+ demo_file = os.path.join(directory, "demo.hdf5")
+ print("Saving demo.")
+ behavior_demo_collection.collect_demo(
+ "cleaning_out_drawers", 0, "Benevolence_1_int", vr_log_path=demo_file, no_vr=True, max_steps=400
+ )
+
+ # Then replay the random demo.
+ print("Replaying demo.")
+ replay_file = os.path.join(directory, "replay.hdf5")
+ behavior_demo_replay.safe_replay_demo(demo_file, out_log_path=replay_file, mode="headless")
diff --git a/igibson/test/test_glfw_renderer.py b/igibson/test/test_glfw_renderer.py
new file mode 100644
index 000000000..4d0a5860d
--- /dev/null
+++ b/igibson/test/test_glfw_renderer.py
@@ -0,0 +1,24 @@
+import cv2
+import numpy as np
+
+from igibson.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
+
+renderer = MeshRenderer(width=512, height=512)
+renderer.load_object(
+ "C:\\Users\\shen\\Desktop\\GibsonVRStuff\\vr_branch\\gibsonv2\\igibson\\assets\\datasets\\Ohoopee\\Ohoopee_mesh_texture.obj"
+)
+renderer.add_instance(0)
+
+print(renderer.visual_objects, renderer.instances)
+print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx)
+camera_pose = np.array([0, 0, 1.2])
+view_direction = np.array([1, 0, 0])
+renderer.set_camera(camera_pose, camera_pose + view_direction, [1, 0, 0])
+renderer.set_fov(90)
+
+while True:
+ frame = renderer.render(modes=("rgb"))
+ cv2.imshow("test", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
+ q = cv2.waitKey(1)
+
+renderer.release()
diff --git a/tests/test_house.yaml b/igibson/test/test_house.yaml
similarity index 83%
rename from tests/test_house.yaml
rename to igibson/test/test_house.yaml
index 1eb1af305..fd7ed5288 100644
--- a/tests/test_house.yaml
+++ b/igibson/test/test_house.yaml
@@ -8,17 +8,9 @@ trav_map_resolution: 0.1
trav_map_erosion: 2
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: point_nav_random
@@ -73,5 +65,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/tests/test_house_occupancy_grid.yaml b/igibson/test/test_house_occupancy_grid.yaml
similarity index 83%
rename from tests/test_house_occupancy_grid.yaml
rename to igibson/test/test_house_occupancy_grid.yaml
index 9c1dae446..95792ca36 100644
--- a/tests/test_house_occupancy_grid.yaml
+++ b/igibson/test/test_house_occupancy_grid.yaml
@@ -8,17 +8,9 @@ trav_map_resolution: 0.1
trav_map_erosion: 2
# robot
-robot:
- name: Turtlebot
- action_type: continuous
- action_normalize: true
- base_name: null
- scale: 1.0
- self_collision: true
- rendering_params: null
- controller_config:
- base:
- name: DifferentialDriveController
+robot: Turtlebot
+is_discrete: false
+velocity: 1.0
# task
task: point_nav_random
@@ -73,5 +65,5 @@ depth_noise_rate: 0.0
scan_noise_rate: 0.0
# visual objects
-visible_target: true
-
+visual_object_at_initial_target_pos: true
+target_visual_object_visible_to_agent: false
diff --git a/tests/test_igibson_env.py b/igibson/test/test_igibson_env.py
similarity index 84%
rename from tests/test_igibson_env.py
rename to igibson/test/test_igibson_env.py
index 613ae0d81..2eb231337 100644
--- a/tests/test_igibson_env.py
+++ b/igibson/test/test_igibson_env.py
@@ -3,14 +3,13 @@
import igibson
from igibson.envs.igibson_env import iGibsonEnv
-from igibson.tasks.task_base import BaseTask
from igibson.utils.assets_utils import download_assets, download_demo_data
def test_env():
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house.yaml")
env = iGibsonEnv(config_file=config_filename, mode="headless")
try:
for j in range(2):
@@ -30,7 +29,7 @@ def test_env():
def test_env_reload():
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house.yaml")
env = iGibsonEnv(config_file=config_filename, mode="headless")
try:
for i in range(3):
@@ -51,12 +50,11 @@ def test_env_reload():
def test_env_reset():
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house.yaml")
env = iGibsonEnv(config_file=config_filename, mode="headless")
- class TestTask(BaseTask):
- def __init__(self, env):
- super(TestTask, self).__init__(env)
+ class DummyTask(object):
+ def __init__(self):
self.reset_scene_called = False
self.reset_agent_called = False
self.get_task_obs_called = False
@@ -70,7 +68,7 @@ def reset_scene(self, env):
def reset_agent(self, env):
self.reset_agent_called = True
- env.task = TestTask(env)
+ env.task = DummyTask()
env.reset()
assert env.task.reset_scene_called
assert env.task.reset_agent_called
diff --git a/tests/test_igsdf_scene_importing.py b/igibson/test/test_igsdf_scene_importing.py
similarity index 96%
rename from tests/test_igsdf_scene_importing.py
rename to igibson/test/test_igsdf_scene_importing.py
index 12c3906eb..22b7aecdd 100644
--- a/tests/test_igsdf_scene_importing.py
+++ b/igibson/test/test_igsdf_scene_importing.py
@@ -9,7 +9,7 @@
def test_import_igsdf():
scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False)
s = Simulator(mode="headless", image_width=512, image_height=512, device_idx=0)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True)
for i in range(10):
diff --git a/tests/test_motion_planning.py b/igibson/test/test_motion_planning.py
similarity index 86%
rename from tests/test_motion_planning.py
rename to igibson/test/test_motion_planning.py
index 0fdffaccc..83d76a54d 100644
--- a/tests/test_motion_planning.py
+++ b/igibson/test/test_motion_planning.py
@@ -13,7 +13,7 @@ def test_occupancy_grid():
print("Test env")
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house_occupancy_grid.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house_occupancy_grid.yaml")
nav_env = iGibsonEnv(config_file=config_filename, mode="headless")
nav_env.reset()
@@ -26,7 +26,7 @@ def test_occupancy_grid():
assert np.sum(ts[0]["occupancy_grid"] == 1) > 0
plt.imshow(ts[0]["occupancy_grid"][:, :, 0])
plt.colorbar()
- plt.savefig(os.path.join(os.path.dirname(__file__), "occupancy_grid.png"))
+ plt.savefig("occupancy_grid.png")
nav_env.clean()
@@ -34,11 +34,11 @@ def test_base_planning():
print("Test env")
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house_occupancy_grid.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house_occupancy_grid.yaml")
nav_env = iGibsonEnv(config_file=config_filename, mode="headless")
motion_planner = MotionPlanningWrapper(nav_env)
- nav_env.reset()
+ state = nav_env.reset()
nav_env.robots[0].set_position_orientation([0, 0, 0], [0, 0, 0, 1])
nav_env.simulator.step()
plan = None
diff --git a/tests/test_object.py b/igibson/test/test_object.py
similarity index 85%
rename from tests/test_object.py
rename to igibson/test/test_object.py
index e7a752d05..995195f2f 100644
--- a/tests/test_object.py
+++ b/igibson/test/test_object.py
@@ -1,15 +1,14 @@
import os
-import pybullet as p
-
import igibson
from igibson.objects.articulated_object import ArticulatedObject, RBOObject
from igibson.objects.cube import Cube
from igibson.objects.ycb_object import YCBObject
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.stadium_scene import StadiumScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import download_assets
+from igibson.utils.utils import parse_config
download_assets()
@@ -21,8 +20,9 @@ def test_import_object():
obj = YCBObject("003_cracker_box")
s.import_object(obj)
- assert p.getNumBodies() == 5
+ objs = s.objects
s.disconnect()
+ assert objs == list(range(5))
def test_import_many_object():
@@ -34,12 +34,11 @@ def test_import_many_object():
obj = YCBObject("003_cracker_box")
s.import_object(obj)
- for j in range(100):
+ for j in range(1000):
s.step()
-
- assert p.getNumBodies() == 34
-
+ last_obj = s.objects[-1]
s.disconnect()
+ assert last_obj == 33
def test_import_rbo_object():
@@ -70,6 +69,7 @@ def test_import_box():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
+ print(s.objects)
# wall = [pos, dim]
wall = [
[[0, 7, 1.01], [10, 0.2, 1]],
@@ -97,8 +97,9 @@ def test_import_box():
obj = Cube(curr[0], curr[1])
s.import_object(obj)
- turtlebot1 = Turtlebot()
- turtlebot2 = Turtlebot()
+ config = parse_config(os.path.join(igibson.root_path, "test", "test.yaml"))
+ turtlebot1 = Turtlebot(config)
+ turtlebot2 = Turtlebot(config)
s.import_robot(turtlebot1)
s.import_robot(turtlebot2)
turtlebot1.set_position([6.0, -6.0, 0.0])
diff --git a/tests/test_pbr.py b/igibson/test/test_pbr.py
similarity index 76%
rename from tests/test_pbr.py
rename to igibson/test/test_pbr.py
index 7f6c35036..1b1b058b9 100644
--- a/tests/test_pbr.py
+++ b/igibson/test/test_pbr.py
@@ -23,25 +23,22 @@ def test_render_pbr():
for fn in os.listdir(model_path):
if fn.endswith("obj"):
renderer.load_object(os.path.join(model_path, fn), scale=[1, 1, 1])
- renderer.add_instance_group([i])
+ renderer.add_instance(i)
i += 1
renderer.instances[-1].use_pbr = True
renderer.instances[-1].use_pbr_mapping = True
renderer.set_camera([1.5, 1.5, 1.5], [0, 0, 0], [0, 0, 1], cache=True)
frame = renderer.render(modes=("rgb", "normal"))
- Image.fromarray((255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)).save(
- os.path.join(os.path.dirname(__file__), "test_render.png")
- )
+ Image.fromarray((255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)).save("test_render.png")
renderer.set_camera([1.49, 1.49, 1.49], [0, 0.05, 0.05], [0, 0, 1], cache=True) # simulate camera movement
frame = renderer.render(modes=("optical_flow", "scene_flow"))
- plt.figure()
- ax1 = plt.subplot(1, 2, 1)
- ax1.imshow(np.abs(frame[0][:, :, :3]) / np.max(np.abs(frame[0][:, :, :3])))
- ax2 = plt.subplot(1, 2, 2)
- ax2.imshow(np.abs(frame[1][:, :, :3]) / np.max(np.abs(frame[1][:, :, :3])))
- plt.savefig(os.path.join(os.path.dirname(__file__), "test_render_flow.png"))
+ plt.subplot(1, 2, 1)
+ plt.imshow(np.abs(frame[0][:, :, :3]) / np.max(np.abs(frame[0][:, :, :3])))
+ plt.subplot(1, 2, 2)
+ plt.imshow(np.abs(frame[1][:, :, :3]) / np.max(np.abs(frame[1][:, :, :3])))
+ plt.savefig("test_render_flow.png")
renderer.release()
@@ -62,7 +59,7 @@ def test_render_pbr_optimized():
for fn in os.listdir(model_path):
if fn.endswith("obj"):
renderer.load_object(os.path.join(model_path, fn), scale=[1, 1, 1])
- renderer.add_instance_group([i])
+ renderer.add_instance(i)
i += 1
renderer.instances[-1].use_pbr = True
renderer.instances[-1].use_pbr_mapping = True
@@ -70,16 +67,13 @@ def test_render_pbr_optimized():
renderer.set_camera([1.5, 1.5, 1.5], [0, 0, 0], [0, 0, 1], cache=True)
frame = renderer.render(modes=("rgb", "normal"))
- Image.fromarray((255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)).save(
- os.path.join(os.path.dirname(__file__), "test_render_optimized.png")
- )
+ Image.fromarray((255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)).save("test_render_optimized.png")
renderer.set_camera([1.49, 1.49, 1.49], [0, 0.05, 0.05], [0, 0, 1], cache=True) # simulate camera movement
frame = renderer.render(modes=("optical_flow", "scene_flow"))
- plt.figure()
- ax1 = plt.subplot(1, 2, 1)
- ax1.imshow(np.abs(frame[0][:, :, :3]) / np.max(np.abs(frame[0][:, :, :3])))
- ax2 = plt.subplot(1, 2, 2)
- ax2.imshow(np.abs(frame[1][:, :, :3]) / np.max(np.abs(frame[1][:, :, :3])))
- plt.savefig(os.path.join(os.path.dirname(__file__), "test_render_optimized_flow.png"))
+ plt.subplot(1, 2, 1)
+ plt.imshow(np.abs(frame[0][:, :, :3]) / np.max(np.abs(frame[0][:, :, :3])))
+ plt.subplot(1, 2, 2)
+ plt.imshow(np.abs(frame[1][:, :, :3]) / np.max(np.abs(frame[1][:, :, :3])))
+ plt.savefig("test_render_optimized_flow.png")
renderer.release()
diff --git a/tests/test_render.py b/igibson/test/test_render.py
similarity index 77%
rename from tests/test_render.py
rename to igibson/test/test_render.py
index 3758beec4..3a0481733 100644
--- a/tests/test_render.py
+++ b/igibson/test/test_render.py
@@ -86,7 +86,7 @@ def test_render_rendering(record_property):
start = time.time()
renderer.load_object(os.path.join(test_dir, "mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj"))
elapsed = time.time() - start
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])
renderer.set_fov(90)
rgb = renderer.render(("rgb"))[0]
@@ -103,7 +103,7 @@ def test_render_rendering_cleaning():
for i in range(5):
renderer = MeshRenderer(width=800, height=600)
renderer.load_object(os.path.join(test_dir, "mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj"))
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])
renderer.set_fov(90)
rgb = renderer.render(("rgb"))[0]
@@ -112,3 +112,31 @@ def test_render_rendering_cleaning():
GPUtil.showUtilization()
renderer.release()
GPUtil.showUtilization()
+
+
+"""
+def test_tensor_render_rendering():
+ w = 800
+ h = 600
+ renderer = MeshTensorRenderer(w, h)
+ print('before load')
+ renderer.load_object(os.path.join(dir, 'mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj'))
+ print('after load')
+ renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])
+ renderer.set_fov(90)
+ tensor = torch.cuda.ByteTensor(h, w, 4)
+ tensor2 = torch.cuda.ByteTensor(h, w, 4)
+ renderer.render(tensor, tensor2)
+
+ img_np = tensor.flip(0).data.cpu().numpy().reshape(h, w, 4)
+ img_np2 = tensor2.flip(0).data.cpu().numpy().reshape(h, w, 4)
+ # plt.imshow(np.concatenate([img_np, img_np2], axis=1))
+ # plt.show()
+ assert (np.allclose(np.mean(img_np.astype(np.float32), axis=(0, 1)),
+ np.array([131.71548, 128.34981, 121.81708, 255.86292]), rtol=1e-3))
+ assert (np.allclose(np.mean(img_np2.astype(np.float32), axis=(0, 1)), np.array([154.2405, 0., 0., 255.86292]),
+ rtol=1e-3))
+ # print(np.mean(img_np.astype(np.float32), axis = (0,1)))
+ # print(np.mean(img_np2.astype(np.float32), axis = (0,1)))
+ renderer.release()
+"""
diff --git a/tests/test_render_tensor.py b/igibson/test/test_render_tensor.py
similarity index 70%
rename from tests/test_render_tensor.py
rename to igibson/test/test_render_tensor.py
index 9b83093e4..ff6f42291 100644
--- a/tests/test_render_tensor.py
+++ b/igibson/test/test_render_tensor.py
@@ -1,9 +1,6 @@
-import logging
import os
-import platform
import numpy as np
-import torch
import igibson
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
@@ -11,16 +8,13 @@
def test_tensor_render_rendering():
- if platform.system() != "Linux":
- logging.warning("Skip test_tensor_render_rendering on non-Linux platforms.")
- return
w = 800
h = 600
- setting = MeshRendererSettings(enable_pbr=False, msaa=True, enable_shadow=False)
+ setting = MeshRendererSettings(enable_pbr=False, msaa=True)
renderer = MeshRendererG2G(w, h, rendering_settings=setting)
test_dir = os.path.join(igibson.assets_path, "test")
renderer.load_object(os.path.join(test_dir, "mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj"))
- renderer.add_instance_group([0])
+ renderer.add_instance(0)
renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])
renderer.set_fov(90)
@@ -29,7 +23,6 @@ def test_tensor_render_rendering():
img_np = tensor.flip(0).data.cpu().numpy().reshape(h, w, 4)
img_np2 = tensor2.flip(0).data.cpu().numpy().reshape(h, w, 4)
- # print(np.mean(img_np.astype(np.float32), axis=(0, 1)))
# plt.subplot(1,2,1)
# plt.imshow(img_np)
# plt.subplot(1,2,2)
@@ -37,8 +30,10 @@ def test_tensor_render_rendering():
# plt.show()
assert np.allclose(
np.mean(img_np.astype(np.float32), axis=(0, 1)),
- np.array([131.72513, 128.30641, 121.820724, 255.56105]),
+ np.array([131.71548, 128.34981, 121.81708, 255.86292]),
rtol=1e-3,
)
+ # print(np.mean(img_np.astype(np.float32), axis = (0,1)))
+ # print(np.mean(img_np2.astype(np.float32), axis = (0,1)))
renderer.release()
diff --git a/tests/test_robot.py b/igibson/test/test_robot.py
similarity index 56%
rename from tests/test_robot.py
rename to igibson/test/test_robot.py
index 0be54cef6..480f482f4 100644
--- a/tests/test_robot.py
+++ b/igibson/test/test_robot.py
@@ -3,19 +3,29 @@
import numpy as np
import pybullet as p
-from igibson.robots import REGISTERED_ROBOTS
+import igibson
+from igibson.robots.ant_robot import Ant
+from igibson.robots.fetch_robot import Fetch
+from igibson.robots.humanoid_robot import Humanoid
+from igibson.robots.husky_robot import Husky
+from igibson.robots.jr2_kinova_robot import JR2_Kinova
+from igibson.robots.jr2_robot import JR2
+from igibson.robots.quadrotor_robot import Quadrotor
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.stadium_scene import StadiumScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import download_assets
+from igibson.utils.utils import parse_config
download_assets()
+config = parse_config(os.path.join(igibson.root_path, "test", "test.yaml"))
def test_fetch():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- fetch = REGISTERED_ROBOTS["Fetch"]()
+ fetch = Fetch(config)
s.import_robot(fetch)
for i in range(100):
fetch.calc_state()
@@ -27,7 +37,7 @@ def test_turtlebot():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- turtlebot = REGISTERED_ROBOTS["Turtlebot"]()
+ turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
nbody = p.getNumBodies()
s.disconnect()
@@ -38,7 +48,7 @@ def test_jr2():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- jr2 = REGISTERED_ROBOTS["JR2"]()
+ jr2 = JR2(config)
s.import_robot(jr2)
nbody = p.getNumBodies()
s.disconnect()
@@ -49,9 +59,9 @@ def test_ant():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- ant = REGISTERED_ROBOTS["Ant"]()
+ ant = Ant(config)
s.import_robot(ant)
- ant2 = REGISTERED_ROBOTS["Ant"]()
+ ant2 = Ant(config)
s.import_robot(ant2)
ant2.set_position([0, 2, 2])
nbody = p.getNumBodies()
@@ -65,18 +75,40 @@ def test_husky():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- husky = REGISTERED_ROBOTS["Husky"]()
+ husky = Husky(config)
s.import_robot(husky)
nbody = p.getNumBodies()
s.disconnect()
assert nbody == 5
+def test_humanoid():
+ s = Simulator(mode="headless")
+ scene = StadiumScene()
+ s.import_scene(scene)
+ humanoid = Humanoid(config)
+ s.import_robot(humanoid)
+ nbody = p.getNumBodies()
+ s.disconnect()
+ assert nbody == 5
+
+
+def test_quadrotor():
+ s = Simulator(mode="headless")
+ scene = StadiumScene()
+ s.import_scene(scene)
+ quadrotor = Quadrotor(config)
+ s.import_robot(quadrotor)
+ nbody = p.getNumBodies()
+ s.disconnect()
+ assert nbody == 5
+
+
def test_turtlebot_position():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- turtlebot = REGISTERED_ROBOTS["Turtlebot"]()
+ turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
turtlebot.set_position([0, 0, 5])
@@ -88,13 +120,28 @@ def test_turtlebot_position():
assert np.allclose(pos, np.array([0, 0, 5]))
+def test_humanoid_position():
+ s = Simulator(mode="headless")
+ scene = StadiumScene()
+ s.import_scene(scene)
+ humanoid = Humanoid(config)
+ s.import_robot(humanoid)
+ humanoid.set_position([0, 0, 5])
+ nbody = p.getNumBodies()
+ pos = humanoid.get_position()
+
+ s.disconnect()
+ assert nbody == 5
+ assert np.allclose(pos, np.array([0, 0, 5]))
+
+
def test_multiagent():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- turtlebot1 = REGISTERED_ROBOTS["Turtlebot"]()
- turtlebot2 = REGISTERED_ROBOTS["Turtlebot"]()
- turtlebot3 = REGISTERED_ROBOTS["Turtlebot"]()
+ turtlebot1 = Turtlebot(config)
+ turtlebot2 = Turtlebot(config)
+ turtlebot3 = Turtlebot(config)
s.import_robot(turtlebot1)
s.import_robot(turtlebot2)
@@ -117,28 +164,36 @@ def show_action_sensor_space():
scene = StadiumScene()
s.import_scene(scene)
- turtlebot = REGISTERED_ROBOTS["Turtlebot"]()
+ turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
turtlebot.set_position([0, 1, 0.5])
- ant = REGISTERED_ROBOTS["Ant"]()
+ ant = Ant(config)
s.import_robot(ant)
ant.set_position([0, 2, 0.5])
- jr = REGISTERED_ROBOTS["JR2"]()
+ h = Humanoid(config)
+ s.import_robot(h)
+ h.set_position([0, 3, 2])
+
+ jr = JR2(config)
s.import_robot(jr)
jr.set_position([0, 4, 0.5])
- jr2 = REGISTERED_ROBOTS["JR2"]()
+ jr2 = JR2_Kinova(config)
s.import_robot(jr2)
jr2.set_position([0, 5, 0.5])
- husky = REGISTERED_ROBOTS["Husky"]()
+ husky = Husky(config)
s.import_robot(husky)
husky.set_position([0, 6, 0.5])
+ quad = Quadrotor(config)
+ s.import_robot(quad)
+ quad.set_position([0, 7, 0.5])
+
for robot in s.robots:
- print(type(robot), len(robot.joints), robot.calc_state().shape)
+ print(type(robot), len(robot.ordered_joints), robot.calc_state().shape)
for i in range(100):
s.step()
diff --git a/tests/test_scene_importing.py b/igibson/test/test_scene_importing.py
similarity index 70%
rename from tests/test_scene_importing.py
rename to igibson/test/test_scene_importing.py
index 9a46917ec..c5e39b190 100644
--- a/tests/test_scene_importing.py
+++ b/igibson/test/test_scene_importing.py
@@ -1,26 +1,24 @@
import os
-import pybullet as p
-
import igibson
-from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-from igibson.robots.turtlebot import Turtlebot
+from igibson.robots.turtlebot_robot import Turtlebot
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.scenes.stadium_scene import StadiumScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import download_assets, download_demo_data
+from igibson.utils.utils import parse_config
def test_import_building():
download_assets()
download_demo_data()
- s = Simulator(mode="headless", rendering_settings=MeshRendererSettings(texture_scale=0.4))
+ s = Simulator(mode="headless")
scene = StaticIndoorScene("Rs")
- s.import_scene(scene)
+ s.import_scene(scene, texture_scale=0.4)
for i in range(15):
s.step()
- assert p.getNumBodies() == 2
+ assert s.objects == list(range(2))
s.disconnect()
@@ -30,8 +28,8 @@ def test_import_building_big():
s = Simulator(mode="headless")
scene = StaticIndoorScene("Rs")
- s.import_scene(scene)
- assert p.getNumBodies() == 2
+ s.import_scene(scene, texture_scale=1)
+ assert s.objects == list(range(2))
s.disconnect()
@@ -42,22 +40,24 @@ def test_import_stadium():
s = Simulator(mode="headless")
scene = StadiumScene()
s.import_scene(scene)
- assert p.getNumBodies() == 4
+ print(s.objects)
+ assert s.objects == list(range(4))
s.disconnect()
def test_import_building_viewing():
download_assets()
download_demo_data()
+ config = parse_config(os.path.join(igibson.root_path, "test", "test.yaml"))
s = Simulator(mode="headless")
scene = StaticIndoorScene("Rs")
s.import_scene(scene)
- assert p.getNumBodies() == 2
+ assert s.objects == list(range(2))
- turtlebot1 = Turtlebot()
- turtlebot2 = Turtlebot()
- turtlebot3 = Turtlebot()
+ turtlebot1 = Turtlebot(config)
+ turtlebot2 = Turtlebot(config)
+ turtlebot3 = Turtlebot(config)
s.import_robot(turtlebot1)
s.import_robot(turtlebot2)
diff --git a/tests/test_sensors.py b/igibson/test/test_sensors.py
similarity index 91%
rename from tests/test_sensors.py
rename to igibson/test/test_sensors.py
index a1225c11f..3e7e212d2 100644
--- a/tests/test_sensors.py
+++ b/igibson/test/test_sensors.py
@@ -14,7 +14,7 @@
def test_vision_sensor():
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house.yaml")
env = iGibsonEnv(config_file=config_filename, mode="headless")
vision_modalities = ["rgb", "depth", "pc", "normal", "seg"]
vision_sensor = VisionSensor(env, vision_modalities)
@@ -39,7 +39,7 @@ def test_vision_sensor():
def test_scan_sensor():
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house.yaml")
env = iGibsonEnv(config_file=config_filename, mode="headless")
scan_sensor = ScanSensor(env, ["scan"])
scan_obs = scan_sensor.get_obs(env)["scan"]
@@ -51,7 +51,7 @@ def test_scan_sensor():
def test_velodyne():
download_assets()
download_demo_data()
- config_filename = os.path.join(os.path.dirname(__file__), "test_house.yaml")
+ config_filename = os.path.join(igibson.root_path, "test", "test_house.yaml")
env = iGibsonEnv(config_file=config_filename, mode="headless")
velodyne_sensor = VelodyneSensor(env)
velodyne_obs = velodyne_sensor.get_obs(env)
diff --git a/tests/test_simulator.py b/igibson/test/test_simulator.py
similarity index 100%
rename from tests/test_simulator.py
rename to igibson/test/test_simulator.py
diff --git a/tests/test_states.py b/igibson/test/test_states.py
similarity index 98%
rename from tests/test_states.py
rename to igibson/test/test_states.py
index 0d8105f39..40d34b177 100644
--- a/tests/test_states.py
+++ b/igibson/test/test_states.py
@@ -210,6 +210,7 @@ def test_toggle():
s.import_object(sink)
sink.set_position([1, 1, 0.8])
+ assert "toggleable" in sink.abilities
assert object_states.ToggledOn in sink.states
finally:
@@ -234,6 +235,7 @@ def test_dirty():
s.import_object(sink)
sink.set_position([1, 1, 0.8])
+ assert "dustyable" in sink.abilities
assert object_states.Dusty in sink.states
for i in range(10):
@@ -262,12 +264,13 @@ def test_water_source():
s.import_object(sink)
sink.states[object_states.ToggledOn].set_value(True)
sink.set_position([1, 1, 0.8])
+ assert "waterSource" in sink.abilities
assert object_states.WaterSource in sink.states
for i in range(2):
s.step()
# Check that we have some loaded particles here.
- assert sink.states[object_states.WaterSource].water_stream.get_active_particles()[0].get_body_id() is not None
+ assert sink.states[object_states.WaterSource].water_stream.get_active_particles()[0].body_id is not None
finally:
s.disconnect()
diff --git a/igibson/test/test_vr_renderer_ohoopee.py b/igibson/test/test_vr_renderer_ohoopee.py
new file mode 100644
index 000000000..a348615d1
--- /dev/null
+++ b/igibson/test/test_vr_renderer_ohoopee.py
@@ -0,0 +1,22 @@
+import time
+
+from igibson import dataset_path
+from igibson.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
+from igibson.core.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR
+
+renderer = MeshRendererVR(MeshRenderer, msaa=True)
+# Note that it is necessary to load the full path of an object!
+renderer.load_object(dataset_path + "\\Ohoopee\\Ohoopee_mesh_texture.obj")
+renderer.add_instance(0)
+
+while True:
+ startFrame = time.time()
+ renderer.render()
+
+ endFrame = time.time()
+ deltaT = endFrame - startFrame
+ fps = 1 / float(deltaT)
+
+ print("Current fps: %f" % fps)
+
+renderer.release()
diff --git a/igibson/test/test_vr_system.py b/igibson/test/test_vr_system.py
new file mode 100644
index 000000000..e60dc9e1b
--- /dev/null
+++ b/igibson/test/test_vr_system.py
@@ -0,0 +1,8 @@
+from igibson.core.render.mesh_renderer.Release import MeshRendererContext
+
+vrsys = MeshRendererContext.VRSystem()
+recommendedWidth, recommendedHeight = vrsys.initVR()
+
+print("Initialized VR with width: %d and height: %d" % (recommendedWidth, recommendedHeight))
+
+vrsys.releaseVR()
diff --git a/igibson/utils/assets_utils.py b/igibson/utils/assets_utils.py
index c013275a7..5dc1b41ea 100644
--- a/igibson/utils/assets_utils.py
+++ b/igibson/utils/assets_utils.py
@@ -1,32 +1,13 @@
import argparse
import json
-import logging
import os
import subprocess
-import tempfile
from collections import defaultdict
import yaml
import igibson
-if os.name == "nt":
- import win32api
- import win32con
-
-
-def folder_is_hidden(p):
- """
- Removes hidden folders from a list. Works on Linux, Mac and Windows
-
- :return: true if a folder is hidden in the OS
- """
- if os.name == "nt":
- attribute = win32api.GetFileAttributes(p)
- return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
- else:
- return p.startswith(".") # linux-osx
-
def get_ig_avg_category_specs():
"""
@@ -55,20 +36,6 @@ def get_ig_category_ids():
return defaultdict(lambda: 255, name_to_id)
-def get_available_ig_scenes():
- """
- iGibson interactive scenes
-
- :return: list of available iGibson interactive scenes
- """
- ig_dataset_path = igibson.ig_dataset_path
- ig_scenes_path = os.path.join(ig_dataset_path, "scenes")
- available_ig_scenes = sorted(
- [f for f in os.listdir(ig_scenes_path) if (not folder_is_hidden(f) and f != "background")]
- )
- return available_ig_scenes
-
-
def get_ig_scene_path(scene_name):
"""
Get iGibson scene path
@@ -78,7 +45,7 @@ def get_ig_scene_path(scene_name):
"""
ig_dataset_path = igibson.ig_dataset_path
ig_scenes_path = os.path.join(ig_dataset_path, "scenes")
- logging.info("Scene name: {}".format(scene_name))
+ # print("SCENE NAME:", scene_name)
assert scene_name in os.listdir(ig_scenes_path), "Scene {} does not exist".format(scene_name)
return os.path.join(ig_scenes_path, scene_name)
@@ -137,33 +104,6 @@ def get_ig_model_path(category_name, model_name):
return os.path.join(ig_category_path, model_name)
-def get_all_object_categories():
- """
- Get iGibson all object categories
-
- :return: a list of all object categories
- """
- ig_dataset_path = igibson.ig_dataset_path
- ig_categories_path = os.path.join(ig_dataset_path, "objects")
-
- categories = sorted([f for f in os.listdir(ig_categories_path) if not folder_is_hidden(f)])
- return categories
-
-
-def get_object_models_of_category(category):
- """
- Get iGibson all object categories
-
- :return: a list of all object categories
- """
- ig_dataset_path = igibson.ig_dataset_path
- ig_categories_path = os.path.join(ig_dataset_path, "objects")
-
- category_models = os.listdir(os.path.join(ig_categories_path, category))
- models = [item for item in category_models if os.path.isdir(os.path.join(ig_categories_path, category, item))]
- return models
-
-
def get_all_object_models():
"""
Get iGibson all object models
@@ -198,17 +138,6 @@ def get_ig_assets_version():
return "{}".format(git_head_hash)
-def get_available_g_scenes():
- """
- Gibson scenes
-
- :return: list of available Gibson scenes
- """
- data_path = igibson.g_dataset_path
- available_g_scenes = sorted([f for f in os.listdir(data_path) if not folder_is_hidden(f)])
- return available_g_scenes
-
-
def get_scene_path(scene_id):
"""
Gibson scene path
@@ -250,84 +179,58 @@ def download_assets():
"""
Download iGibson assets
"""
-
- tmp_file = os.path.join(tempfile.gettempdir(), "assets_igibson.tar.gz")
-
- os.makedirs(os.path.dirname(igibson.assets_path), exist_ok=True)
+ if not os.path.exists(os.path.dirname(igibson.assets_path)):
+ os.makedirs(os.path.dirname(igibson.assets_path))
if not os.path.exists(igibson.assets_path):
- logging.info(
- "Downloading and decompressing assets from {}".format(
- "https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz"
- )
- )
os.system(
"wget -c --retry-connrefused --tries=5 --timeout=5 "
- "https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz -O {}".format(tmp_file)
+ "https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz -O /tmp/assets_igibson.tar.gz"
)
- os.system("tar -zxf {} --directory {}".format(tmp_file, os.path.dirname(igibson.assets_path)))
+ os.system("tar -zxf /tmp/assets_igibson.tar.gz --directory {}".format(os.path.dirname(igibson.assets_path)))
def download_demo_data():
"""
Download iGibson demo dataset
"""
-
- tmp_file = os.path.join(tempfile.gettempdir(), "Rs.tar.gz")
-
- os.makedirs(igibson.g_dataset_path, exist_ok=True)
+ if not os.path.exists(igibson.g_dataset_path):
+ os.makedirs(igibson.g_dataset_path)
if not os.path.exists(os.path.join(igibson.g_dataset_path, "Rs")):
- logging.info(
- "Downloading and decompressing Rs Gibson meshfile from {}".format(
- "https://storage.googleapis.com/gibson_scenes/Rs.tar.gz"
- )
- )
os.system(
"wget -c --retry-connrefused --tries=5 --timeout=5 "
- "https://storage.googleapis.com/gibson_scenes/Rs.tar.gz -O {}".format(tmp_file)
+ "https://storage.googleapis.com/gibson_scenes/Rs.tar.gz -O /tmp/Rs.tar.gz"
)
- os.system("tar -zxf {} --directory {}".format(tmp_file, igibson.g_dataset_path))
+ os.system("tar -zxf /tmp/Rs.tar.gz --directory {}".format(igibson.g_dataset_path))
def download_dataset(url):
"""
Download Gibson dataset
"""
-
if not os.path.exists(igibson.g_dataset_path):
- logging.info("Creating Gibson dataset folder at {}".format(igibson.g_dataset_path))
os.makedirs(igibson.g_dataset_path)
file_name = url.split("/")[-1]
-
- tmp_file = os.path.join(tempfile.gettempdir(), file_name)
-
- logging.info("Downloading and decompressing the full Gibson dataset from {}".format(url))
- os.system("wget -c --retry-connrefused --tries=5 --timeout=5 {} -O {}".format(url, tmp_file))
- os.system("tar -zxf {} --strip-components=1 --directory {}".format(tmp_file, igibson.g_dataset_path))
+ os.system("wget -c --retry-connrefused --tries=5 --timeout=5 {} -O /tmp/{}".format(url, file_name))
+ os.system("tar -zxf /tmp/{} --strip-components=1 --directory {}".format(file_name, igibson.g_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
def download_ext_scene_assets():
- logging.info("Downloading and decompressing 3DFront and Cubicasa")
os.makedirs(igibson.threedfront_dataset_path, exist_ok=True)
os.makedirs(igibson.cubicasa_dataset_path, exist_ok=True)
url = "https://storage.googleapis.com/gibson_scenes/default_materials.tar.gz"
-
file_name = url.split("/")[-1]
- tmp_file = os.path.join(tempfile.gettempdir(), file_name)
-
os.system("wget -c --retry-connrefused --tries=5 --timeout=5 {} -O /tmp/{}".format(url, file_name))
- os.system("tar -zxf {} --directory {}".format(tmp_file, igibson.cubicasa_dataset_path))
- os.system("tar -zxf {} --directory {}".format(tmp_file, igibson.threedfront_dataset_path))
+ os.system("tar -zxf /tmp/{} --directory {}".format(file_name, igibson.cubicasa_dataset_path))
+ os.system("tar -zxf /tmp/{} --directory {}".format(file_name, igibson.threedfront_dataset_path))
url = "https://storage.googleapis.com/gibson_scenes/threedfront_urdfs.tar.gz"
file_name = url.split("/")[-1]
- tmp_file = os.path.join(tempfile.gettempdir(), file_name)
-
os.system("wget -c --retry-connrefused --tries=5 --timeout=5 {} -O /tmp/{}".format(url, file_name))
- os.system("tar -zxf {} --directory {}".format(tmp_file, igibson.threedfront_dataset_path))
+ os.system("tar -zxf /tmp/{} --directory {}".format(file_name, igibson.threedfront_dataset_path))
def download_ig_dataset():
@@ -343,16 +246,11 @@ def download_ig_dataset():
print("You need to agree to the terms for using iGibson dataset.")
if not os.path.exists(igibson.ig_dataset_path):
- logging.info("Creating iGibson dataset folder at {}".format(igibson.g_dataset_path))
os.makedirs(igibson.ig_dataset_path)
-
url = "https://storage.googleapis.com/gibson_scenes/ig_dataset.tar.gz"
file_name = url.split("/")[-1]
- tmp_file = os.path.join(tempfile.gettempdir(), file_name)
-
- logging.info("Downloading and decompressing the full iGibson dataset of scenes from {}".format(url))
- os.system("wget -c --retry-connrefused --tries=5 --timeout=5 {} -O {}".format(url, tmp_file))
- os.system("tar -zxf {} --strip-components=1 --directory {}".format(tmp_file, igibson.ig_dataset_path))
+ os.system("wget -c --retry-connrefused --tries=5 --timeout=5 {} -O /tmp/{}".format(url, file_name))
+ os.system("tar -zxf /tmp/{} --strip-components=1 --directory {}".format(file_name, igibson.ig_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
@@ -384,7 +282,7 @@ def change_data_path():
parser.add_argument(
"--download_ext_scene_assets", action="store_true", help="download external scene dataset assets"
)
- parser.add_argument("--change_data_path", action="store_true", help="change the path to store assets and datasets")
+ parser.add_argument("--change_data_path", action="store_true", help="change the path to store assets and datasert")
args = parser.parse_args()
diff --git a/igibson/utils/behavior_robot_planning_utils.py b/igibson/utils/behavior_robot_planning_utils.py
index 43cb94d03..9b532523c 100644
--- a/igibson/utils/behavior_robot_planning_utils.py
+++ b/igibson/utils/behavior_robot_planning_utils.py
@@ -8,6 +8,7 @@
from igibson.external.pybullet_tools.utils import (
CIRCULAR_LIMITS,
MAX_DISTANCE,
+ MAX_DISTANCE_GRASP,
PI,
birrt,
circular_difference,
@@ -33,11 +34,12 @@ def plan_base_motion_br(
resolutions=0.05 * np.ones(3),
max_distance=MAX_DISTANCE,
override_sample_fn=None,
+ rng=np.random.default_rng(23),
**kwargs
):
def sample_fn():
- x, y = np.random.uniform(*base_limits)
- theta = np.random.uniform(*CIRCULAR_LIMITS)
+ x, y = (rng.random() * (base_limits[1] - base_limits[0])) + base_limits[0]
+ theta = (rng.random() * (CIRCULAR_LIMITS[1] - CIRCULAR_LIMITS[0])) + CIRCULAR_LIMITS[0]
return (x, y, theta)
if override_sample_fn is not None:
@@ -47,8 +49,11 @@ def sample_fn():
distance_fn = get_base_distance_fn(weights=weights)
body_ids = []
- for part in ["body", "left_hand", "right_hand"]:
- body_ids.append(robot.links[part].get_body_id())
+ for part in ["body", "left_hand", "right_hand", "eye"]:
+ body_ids.append(robot.parts[part].body_id)
+
+ if robot.parts["right_hand"].object_in_hand is not None:
+ body_ids.append(robot.parts["right_hand"].object_in_hand)
def extend_fn(q1, q2):
target_theta = np.arctan2(q2[1] - q1[1], q2[0] - q1[0])
@@ -89,10 +94,10 @@ def collision_fn(q):
yaw = p.getEulerFromQuaternion(robot.get_orientation())[2]
start_conf = [pos[0], pos[1], yaw]
if collision_fn(start_conf):
- # print("Warning: initial configuration is in collision")
+ print("Warning: initial configuration is in collision")
return None
if collision_fn(end_conf):
- # print("Warning: end configuration is in collision")
+ print("Warning: end configuration is in collision")
return None
if direct:
return direct_path(start_conf, end_conf, extend_fn, collision_fn)
@@ -102,12 +107,12 @@ def collision_fn(q):
def dry_run_base_plan(robot: BehaviorRobot, plan):
for (x, y, yaw) in plan:
robot.set_position_orientation([x, y, robot.initial_z_offset], p.getQuaternionFromEuler([0, 0, yaw]))
- time.sleep(0.01)
+ time.sleep(0.05)
def dry_run_arm_plan(robot: BehaviorRobot, plan):
for (x, y, z, roll, pitch, yaw) in plan:
- robot.links["right_hand"].set_position_orientation([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
+ robot.parts["right_hand"].set_position_orientation([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw]))
time.sleep(0.01)
@@ -141,19 +146,25 @@ def plan_hand_motion_br(
direct=False,
weights=(1, 1, 1, 5, 5, 5),
resolutions=0.02 * np.ones(6),
- max_distance=MAX_DISTANCE,
+ max_distance=MAX_DISTANCE_GRASP,
+ rng=np.random.default_rng(23),
**kwargs
):
def sample_fn():
- x, y, z = np.random.uniform(*hand_limits)
- r, p, yaw = np.random.uniform((-PI, -PI, -PI), (PI, PI, PI))
+ # x, y, z = (rng.random() * (hand_limits[1] - hand_limits[0])) + hand_limits[0] #np.random.uniform(*hand_limits)
+ x = rng.random() * (hand_limits[1][0] - hand_limits[0][0]) + hand_limits[0][0]
+ y = rng.random() * (hand_limits[1][1] - hand_limits[0][1]) + hand_limits[0][1]
+ z = rng.random() * (hand_limits[1][2] - hand_limits[0][2]) + hand_limits[0][2]
+ r = (rng.random() * (2 * PI)) - PI
+ p = (rng.random() * (2 * PI)) - PI
+ yaw = (rng.random() * (2 * PI)) - PI
return (x, y, z, r, p, yaw)
difference_fn = get_hand_difference_fn()
distance_fn = get_hand_distance_fn(weights=weights)
- pos = robot.links["right_hand"].get_position()
- orn = robot.links["right_hand"].get_orientation()
+ pos = robot.parts["right_hand"].get_position()
+ orn = robot.parts["right_hand"].get_orientation()
rpy = p.getEulerFromQuaternion(orn)
start_conf = [pos[0], pos[1], pos[2], rpy[0], rpy[1], rpy[2]]
@@ -174,7 +185,7 @@ def extend_fn(q1, q2):
def collision_fn(q):
# TODO: update this function
# set_base_values(body, q)
- robot.links["right_hand"].set_position_orientation(
+ robot.parts["right_hand"].set_position_orientation(
[q[0], q[1], q[2]], p.getQuaternionFromEuler([q[3], q[4], q[5]])
)
if obj_in_hand is not None:
@@ -185,22 +196,25 @@ def collision_fn(q):
)
collision = any(
- pairwise_collision(robot.links["right_hand"].get_body_id(), obs, max_distance=max_distance)
- for obs in obstacles
+ pairwise_collision(robot.parts["right_hand"].body_id, obs, max_distance=max_distance) for obs in obstacles
)
if obj_in_hand is not None:
+ if type(obj_in_hand.body_id) == list:
+ obj_in_hand_body_id = obj_in_hand.body_id[0]
+ else:
+ obj_in_hand_body_id = obj_in_hand.body_id
collision = collision or any(
- pairwise_collision(obj_in_hand.get_body_id(), obs, max_distance=max_distance) for obs in obstacles
+ pairwise_collision(obj_in_hand_body_id, obs, max_distance=max_distance) for obs in obstacles if obs != 0
)
return collision
if collision_fn(start_conf):
- # print("Warning: initial configuration is in collision")
+ print("Warning: initial configuration is in collision")
return None
if collision_fn(end_conf):
- # print("Warning: end configuration is in collision")
+ print("Warning: end configuration is in collision")
return None
if direct:
return direct_path(start_conf, end_conf, extend_fn, collision_fn)
@@ -210,16 +224,14 @@ def collision_fn(q):
if __name__ == "__main__":
config = parse_config(os.path.join(igibson.example_config_path, "behavior.yaml"))
settings = MeshRendererSettings(enable_shadow=False, msaa=False)
- s = Simulator(
- mode="gui_interactive", use_pb_gui=True, image_width=256, image_height=256, rendering_settings=settings
- )
+ s = Simulator(mode="gui", image_width=256, image_height=256, rendering_settings=settings)
scene = EmptyScene()
scene.objects_by_id = {}
- s.import_scene(scene)
+ s.import_scene(scene, render_floor_plane=True)
- agent = BehaviorRobot(s, show_visual_head=True, use_ghost_hands=False)
- s.import_robot(agent)
+ agent = BehaviorRobot(s, use_tracked_body_override=True, show_visual_head=True, use_ghost_hands=False)
+ s.import_behavior_robot(agent)
s.register_main_vr_robot(agent)
initial_pos_z_offset = 0.7
@@ -230,7 +242,7 @@ def collision_fn(q):
plan = plan_hand_motion_br(agent, [3, 3, 3, 0, 0, 0], ((-5, -5, -5), (5, 5, 5)))
print(plan)
for q in plan:
- agent.links["right_hand"].set_position_orientation(
+ agent.parts["right_hand"].set_position_orientation(
[q[0], q[1], q[2]], p.getQuaternionFromEuler([q[3], q[4], q[5]])
)
time.sleep(0.05)
diff --git a/igibson/utils/checkpoint_utils.py b/igibson/utils/checkpoint_utils.py
index 71a3750da..7916827da 100644
--- a/igibson/utils/checkpoint_utils.py
+++ b/igibson/utils/checkpoint_utils.py
@@ -1,35 +1,54 @@
"""This file contains utils for BEHAVIOR demo replay checkpoints."""
import json
import os
+from igibson.objects.articulated_object import URDFObject
import pybullet as p
from igibson.utils.utils import restoreState
+def save_task_relevant_state(env, root_directory, filename="behavior_dump"):
+ json_path = os.path.join(root_directory, f"{filename}.json")
+
+ # Save the dump in a file.
+ with open(json_path, "w") as f:
+ json.dump(save_task_relevant_object_and_robot_states(env), f)
+
+
+def save_sim_urdf_object_state(sim, root_directory, filename="behavior_dump"):
+ json_path = os.path.join(root_directory, f"{filename}.json")
+
+ # Save the dump in a file.
+ with open(json_path, "w") as f:
+ json.dump(save_all_scene_object_and_robot_states(sim), f)
+
+
def save_checkpoint(simulator, root_directory):
bullet_path = os.path.join(root_directory, "%d.bullet" % simulator.frame_count)
json_path = os.path.join(root_directory, "%d.json" % simulator.frame_count)
-
# Save the simulation state.
p.saveBullet(bullet_path)
-
# Save the dump in a file.
with open(json_path, "w") as f:
json.dump(save_internal_states(simulator), f)
+ return simulator.frame_count
def load_checkpoint(simulator, root_directory, frame):
bullet_path = os.path.join(root_directory, "%d.bullet" % frame)
json_path = os.path.join(root_directory, "%d.json" % frame)
-
# Restore the simulation state.
+ # p.restoreState(fileName=bullet_path)
restoreState(fileName=bullet_path)
-
with open(json_path, "r") as f:
dump = json.load(f)
-
load_internal_states(simulator, dump)
+ # NOTE: For all articulated objects, we need to force_wakeup
+ # for the visuals in the simulator to update
+ for obj in simulator.scene.get_objects():
+ if isinstance(obj, URDFObject):
+ obj.force_wakeup()
def save_internal_states(simulator):
@@ -47,6 +66,14 @@ def save_internal_states(simulator):
def load_internal_states(simulator, dump):
+ # NOTE: sometimes notebooks turn into hardbacks here.
+ # i.e if you (1) create iGibson BehaviorEnv, (2) save it
+ # (3) create a new iGibson BehaviorEnv with the same random seed
+ # and other parameters and (4) try to load the saved values from (2)
+ # you might see a KeyError for a notebook or hardback, but this is
+ # simply because creating a new environment in (3) somehow may cause
+ # some notebooks to be renamed as hardbacks!!!
+
# Restore the object state.
object_dump = dump["objects"]
for name, obj in simulator.scene.objects_by_name.items():
@@ -56,3 +83,32 @@ def load_internal_states(simulator, dump):
robot_dumps = dump["robots"]
for robot, robot_dump in zip(simulator.robots, robot_dumps):
robot.load_state(robot_dump)
+
+
+def save_task_relevant_object_and_robot_states(env):
+ # Dump the object state.
+ object_dump = {}
+ for obj in env.task_relevant_objects:
+ object_dump[obj.bddl_object_scope] = {'metadata': obj.metadata, 'asset_path': obj.model_path, 'pose': tuple(obj.get_position_orientation()), 'scale': tuple(obj.scale)}
+
+ # Dump the robot state.
+ robot_dump = []
+ for robot in env.simulator.robots:
+ robot_dump.append(robot.dump_state())
+
+ return {"objects": object_dump, "robots": robot_dump}
+
+def save_all_scene_object_and_robot_states(sim):
+ # Dump the object state, but only for objects of type URDFObject
+ # that are in the sim.
+ object_dump = {}
+ for obj in sim.scene.get_objects():
+ if 'URDFObject' in str(type(obj)):
+ object_dump[obj.bddl_object_scope] = {'metadata': obj.metadata, 'asset_path': obj.model_path, 'pose': tuple(obj.get_position_orientation()), 'scale': tuple(obj.scale)}
+
+ # Dump the robot state.
+ robot_dump = []
+ for robot in sim.robots:
+ robot_dump.append(robot.dump_state())
+
+ return {"objects": object_dump, "robots": robot_dump}
\ No newline at end of file
diff --git a/igibson/utils/constants.py b/igibson/utils/constants.py
index 7dfd6ca68..8370fc028 100644
--- a/igibson/utils/constants.py
+++ b/igibson/utils/constants.py
@@ -9,20 +9,6 @@
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
-class ViewerMode(IntEnum):
- NAVIGATION = 0
- MANIPULATION = 1
- PLANNING = 2
-
-
-class SimulatorMode(IntEnum):
- GUI_INTERACTIVE = 1
- GUI_NON_INTERACTIVE = 2
- HEADLESS = 3
- HEADLESS_TENSOR = 4
- VR = 5
-
-
class SemanticClass(IntEnum):
BACKGROUND = 0
ROBOTS = 1
@@ -42,14 +28,10 @@ class OccupancyGridState(object):
FREESPACE = 1.0
-# PyBullet-related
class PyBulletSleepState(IntEnum):
AWAKE = 1
-PYBULLET_BASE_LINK_INDEX = -1
-
-
# BEHAVIOR-related
FLOOR_SYNSET = "floor.n.01"
NON_SAMPLEABLE_OBJECTS = []
diff --git a/igibson/utils/data_utils/ext_object/scripts/step_5_visualizations.py b/igibson/utils/data_utils/ext_object/scripts/step_5_visualizations.py
index d9370d997..c72acd7d4 100644
--- a/igibson/utils/data_utils/ext_object/scripts/step_5_visualizations.py
+++ b/igibson/utils/data_utils/ext_object/scripts/step_5_visualizations.py
@@ -34,13 +34,13 @@ def main():
settings = MeshRendererSettings(env_texture_filename=hdr_texture, enable_shadow=True, msaa=True)
s = Simulator(mode="headless", image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings)
- scene = EmptyScene(render_floor_plane=False)
- s.import_scene(scene)
+ scene = EmptyScene()
+ s.import_scene(scene, render_floor_plane=False)
s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0])
s.renderer.load_object("plane/plane_z_up_0.obj", scale=[3, 3, 3])
- s.renderer.add_instance_group([0])
+ s.renderer.add_instance(0)
s.renderer.set_pose([0, 0, -1.5, 1, 0, 0.0, 0.0], -1)
###########################
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/category_viewer.py b/igibson/utils/data_utils/ext_object/scripts_wip/category_viewer.py
index 87426557e..a65b445b9 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/category_viewer.py
+++ b/igibson/utils/data_utils/ext_object/scripts_wip/category_viewer.py
@@ -77,7 +77,7 @@ def main():
batch_size = 100
max_attempts = 100
- s = Simulator(mode="gui_interactive", use_pb_gui=True)
+ s = Simulator(mode="gui")
scene = EmptyScene()
s.import_scene(scene)
acs = get_ig_avg_category_specs()
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_stable_rotations.py b/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_stable_rotations.py
index 03585ec37..d70c67eeb 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_stable_rotations.py
+++ b/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_stable_rotations.py
@@ -62,7 +62,7 @@ def main(args):
for line in f:
cat_ids.append(line.strip().split("/"))
for object_cat, object_id in cat_ids:
- print("Processing %s %s" % (object_cat, object_id))
+ # print("Processing %s %s" % (object_cat, object_id))
metadata_file = "data/ig_dataset/objects/%s/%s/misc/metadata.json" % (object_cat, object_id)
with open(metadata_file, "r") as f:
metadata = json.load(f)
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/joint_annotation_tool.py b/igibson/utils/data_utils/ext_object/scripts_wip/joint_annotation_tool.py
index 6e2d5621b..f22922048 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/joint_annotation_tool.py
+++ b/igibson/utils/data_utils/ext_object/scripts_wip/joint_annotation_tool.py
@@ -22,7 +22,6 @@
ABILITY_NAME = "openable"
META_FIELD = "openable_joint_ids"
-BOTH_SIDES_META_FIELD = "openable_both_sides"
SKIP_EXISTING = True
ALLOWED_JOINT_TYPES = ["revolute", "prismatic", "continuous"]
@@ -68,13 +67,11 @@ def get_joint_selection(s, objdirfull, offline_joints):
)
accepted_joint_infos = []
- both_sides = False
for joint in relevant_joints:
if joint.jointUpperLimit <= joint.jointLowerLimit:
print("Bounds seem funky.")
toggle_time = time.time()
- reverse = False
percentages = [0, 0.25, 0.5, 0.75, 1.0]
i = 0
while True:
@@ -82,10 +79,7 @@ def get_joint_selection(s, objdirfull, offline_joints):
if time.time() - toggle_time > 0.5:
toggle_time = time.time()
i += 1
- idx = i % len(percentages)
- if reverse:
- idx = len(percentages) - idx - 1
- percentage = percentages[idx]
+ percentage = percentages[i % len(percentages)]
pos = (1 - percentage) * joint.jointLowerLimit + percentage * joint.jointUpperLimit
p.resetJointState(bid, joint.jointIndex, pos)
@@ -95,22 +89,14 @@ def get_joint_selection(s, objdirfull, offline_joints):
continue
elif event.key.char == "y":
- accepted_joint_infos.append((joint, -1 if reverse else 1))
+ accepted_joint_infos.append(joint)
break
elif event.key.char == "n":
break
- elif event.key.char == "r":
- reverse = not reverse
- print("Reverse:", reverse)
-
- elif event.key.char == "b":
- both_sides = not both_sides
- print("Both ways:", both_sides)
-
obj.set_position([300, 300, 5])
- return accepted_joint_infos, both_sides
+ return accepted_joint_infos
def main():
@@ -206,7 +192,7 @@ def main():
sorted_joint_counts = sorted(relevant_joint_counts.items(), key=lambda x: x[0])
print("Objects per joint count:\n%s" % "\n".join(["%d joints: %d" % pair for pair in sorted_joint_counts]))
- s = Simulator(mode="gui_interactive", use_pb_gui=True)
+ s = Simulator(mode="gui")
scene = EmptyScene()
s.import_scene(scene)
i = 0
@@ -223,13 +209,12 @@ def main():
print("%s already has the requested link." % objdirfull)
existing = True
processed_allowed_joints = meta[META_FIELD]
- both_sides = meta[BOTH_SIDES_META_FIELD] if BOTH_SIDES_META_FIELD in meta else False
if not existing or not SKIP_EXISTING:
joint_names_matching = [bytes("obj_" + j, encoding="utf-8") for j in joints]
- allowed_joints, both_sides = get_joint_selection(s, objdirfull, joint_names_matching)
+ allowed_joints = get_joint_selection(s, objdirfull, joint_names_matching)
processed_allowed_joints = [
- (ji.jointIndex, str(ji.jointName[4:], encoding="utf-8"), reverse) for ji, reverse in allowed_joints
+ (ji.jointIndex, str(ji.jointName[4:], encoding="utf-8")) for ji in allowed_joints
]
# Do some final validation
@@ -237,7 +222,6 @@ def main():
print("Object %s has joints but none were useful for openable." % objdirfull)
meta[META_FIELD] = processed_allowed_joints
- meta[BOTH_SIDES_META_FIELD] = both_sides
with open(mfn, "w") as mf:
json.dump(meta, mf)
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/link_annotation_tool.py b/igibson/utils/data_utils/ext_object/scripts_wip/link_annotation_tool.py
index f2af0f6f9..425e0feb8 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/link_annotation_tool.py
+++ b/igibson/utils/data_utils/ext_object/scripts_wip/link_annotation_tool.py
@@ -130,7 +130,7 @@ def main():
size = np.array(meta["links"][LINK_NAME]["size"])
rotation = np.array(meta["links"][LINK_NAME]["rpy"])
- s = Simulator(mode="gui_interactive", use_pb_gui=True)
+ s = Simulator(mode="gui")
scene = EmptyScene()
s.import_scene(scene)
obj = get_obj(objdirfull)
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/link_bounding_box_tool.py b/igibson/utils/data_utils/ext_object/scripts_wip/link_bounding_box_tool.py
deleted file mode 100644
index 7beaf69ba..000000000
--- a/igibson/utils/data_utils/ext_object/scripts_wip/link_bounding_box_tool.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import json
-import os
-import xml.etree.ElementTree as ET
-
-import numpy as np
-import tqdm
-import trimesh
-
-from igibson.utils.assets_utils import get_all_object_models
-
-SKIP_EXISTING = True
-IGNORE_ERRORS = False
-
-
-def get_urdf_filename(folder):
- return os.path.join(folder, os.path.basename(folder) + ".urdf")
-
-
-def get_metadata_filename(objdir):
- return os.path.join(objdir, "misc", "metadata.json")
-
-
-def validate_meta(meta):
- if "link_bounding_boxes" not in meta:
- return "No bounding box found"
-
- lbbs = meta["link_bounding_boxes"]
- for link, lbb in lbbs.items():
- for mesh_type in ["visual", "collision"]:
- if mesh_type not in lbb:
- return "No " + mesh_type + " bounding box found for link " + link
-
- lbb_thistype = lbb[mesh_type]
- if lbb_thistype is None:
- continue
-
- for bb_type in ["axis_aligned", "oriented"]:
- if bb_type not in lbb_thistype:
- return "No " + bb_type + "bounding box found for link " + link + ", type " + mesh_type
-
- return None
-
-
-def main():
- # Now collect the actual objects.
- objects = get_all_object_models()
-
- objects.sort()
- print("%d objects.\n" % len(objects))
-
- # Filter out already-processed objects.
- objects_to_process = []
- print("Scanning all objects.")
- for objdirfull in tqdm.tqdm(objects):
- mfn = get_metadata_filename(objdirfull)
- with open(mfn, "r") as mf:
- meta = json.load(mf)
-
- if SKIP_EXISTING:
- validation_error = validate_meta(meta)
- if validation_error is None:
- continue
- else:
- print("%s: %s" % (objdirfull, validation_error))
-
- objects_to_process.append(objdirfull)
-
- # Now process the remaining.
- print("%d objects will be processed. Starting processing.\n" % len(objects_to_process))
- for objdirfull in tqdm.tqdm(objects_to_process):
- mfn = get_metadata_filename(objdirfull)
- with open(mfn, "r") as mf:
- meta = json.load(mf)
-
- try:
- urdf_file = get_urdf_filename(objdirfull)
- tree = ET.parse(urdf_file)
- links = tree.findall("link")
-
- link_bounding_boxes = {}
- for link in links:
- this_link_bounding_boxes = {}
-
- for mesh_type in ["collision", "visual"]:
- try:
- # For each type of mesh, we first combine all of the different mesh files into a single mesh.
- mesh_type_nodes = link.findall(mesh_type)
- combined_mesh = trimesh.Trimesh()
-
- if not mesh_type_nodes:
- # None here means that the object does not have this type of mesh.
- this_link_bounding_boxes[mesh_type] = None
- continue
-
- for mesh_type_node in mesh_type_nodes:
- origin = mesh_type_node.find("origin")
- rotation = origin.attrib["rpy"] if "rpy" in origin.attrib else "0 0 0"
- rotation = np.array([float(x) for x in rotation.split(" ")])
- assert rotation.shape == (3,)
- translation = origin.attrib["xyz"] if "xyz" in origin.attrib else "0 0 0"
- translation = np.array([float(x) for x in translation.split(" ")])
- assert translation.shape == (3,)
- mesh_nodes = mesh_type_node.findall("geometry/mesh")
-
- for mesh_node in mesh_nodes:
- object_file = os.path.join(objdirfull, mesh_node.attrib["filename"])
- mesh = trimesh.load(object_file, force="mesh", skip_materials=True)
-
- scale = mesh_node.attrib["scale"] if "scale" in mesh_node.attrib else "1 1 1"
- scale = np.array([float(x) for x in scale.split(" ")])
-
- # Apply the translation & scaling and add the mesh onto the combined mesh.
- combined_mesh += mesh.apply_transform(
- trimesh.transformations.compose_matrix(
- scale=scale, angles=rotation, translate=translation
- )
- )
-
- # Now that we have the combined mesh, let's simply compute the bounding box.
- axis_aligned_bbox = combined_mesh.bounding_box
- axis_aligned_bbox_dict = {
- "extent": np.array(axis_aligned_bbox.primitive.extents).tolist(),
- "transform": np.array(axis_aligned_bbox.primitive.transform).tolist(),
- }
-
- oriented_bbox = combined_mesh.bounding_box_oriented
- oriented_bbox_dict = {
- "extent": np.array(oriented_bbox.primitive.extents).tolist(),
- "transform": np.array(oriented_bbox.primitive.transform).tolist(),
- }
-
- this_link_bounding_boxes[mesh_type] = {
- "axis_aligned": axis_aligned_bbox_dict,
- "oriented": oriented_bbox_dict,
- }
- except Exception as e:
- print(
- "Problem with %s mesh in link %s in obj %s: %s"
- % (mesh_type, link.attrib["name"], objdirfull, e)
- )
-
- # We are quite sensitive against missing collision meshes!
- if mesh_type == "collision":
- raise
-
- if len(this_link_bounding_boxes) > 0:
- link_bounding_boxes[link.attrib["name"]] = this_link_bounding_boxes
-
- if len(link_bounding_boxes) == 0:
- raise ValueError("No links have a mesh for object %s" % objdirfull)
-
- meta["link_bounding_boxes"] = link_bounding_boxes
-
- with open(mfn, "w") as mf:
- json.dump(meta, mf)
- except:
- combined_mesh.show()
-
- if IGNORE_ERRORS:
- print("Something went wrong with ", objdirfull)
- else:
- raise
-
-
-if __name__ == "__main__":
- main()
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/query_object_placement_probs.py b/igibson/utils/data_utils/ext_object/scripts_wip/query_object_placement_probs.py
index c8f82cc14..15983cde9 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/query_object_placement_probs.py
+++ b/igibson/utils/data_utils/ext_object/scripts_wip/query_object_placement_probs.py
@@ -92,7 +92,7 @@ def main(args):
s = Simulator(mode="headless", image_width=800, image_height=800, rendering_settings=settings)
simulator = s
scene = InteractiveIndoorScene(scene_name, texture_randomization=False, object_randomization=False)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
for obj_name in scene.objects_by_name:
obj = scene.objects_by_name[obj_name]
diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/sample_clutter.py b/igibson/utils/data_utils/ext_object/scripts_wip/sample_clutter.py
index c19452b97..de82170e9 100644
--- a/igibson/utils/data_utils/ext_object/scripts_wip/sample_clutter.py
+++ b/igibson/utils/data_utils/ext_object/scripts_wip/sample_clutter.py
@@ -87,7 +87,7 @@ def main(args):
object_randomization=False,
load_object_categories=support_categories,
)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
renderer = s.renderer
category_supporting_objects = {}
diff --git a/igibson/utils/data_utils/ext_scene/scripts/step_5_generate_trav_map.py b/igibson/utils/data_utils/ext_scene/scripts/step_5_generate_trav_map.py
index 42431c5a1..ad427da8b 100644
--- a/igibson/utils/data_utils/ext_scene/scripts/step_5_generate_trav_map.py
+++ b/igibson/utils/data_utils/ext_scene/scripts/step_5_generate_trav_map.py
@@ -34,7 +34,7 @@ def generate_trav_map(scene_name, scene_source, load_full_scene=True):
if not load_full_scene:
scene._set_first_n_objects(3)
s = Simulator(mode="headless", image_width=512, image_height=512, device_idx=0)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
if load_full_scene:
scene.open_all_doors()
diff --git a/igibson/utils/data_utils/ext_scene/scripts/utils/print_room_furniture_support_type.py b/igibson/utils/data_utils/ext_scene/scripts/utils/print_room_furniture_support_type.py
index ad58afdf1..91f17a60a 100644
--- a/igibson/utils/data_utils/ext_scene/scripts/utils/print_room_furniture_support_type.py
+++ b/igibson/utils/data_utils/ext_scene/scripts/utils/print_room_furniture_support_type.py
@@ -17,7 +17,7 @@ def get_scene_support_objs(scene_name):
s = Simulator(mode="headless", image_width=800, image_height=800, rendering_settings=settings)
simulator = s
scene = InteractiveIndoorScene(scene_name, texture_randomization=False, object_randomization=False)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
for obj_name in scene.objects_by_name:
obj = scene.objects_by_name[obj_name]
diff --git a/igibson/utils/data_utils/sampling_task/sampling_loader.py b/igibson/utils/data_utils/sampling_task/sampling_loader.py
deleted file mode 100644
index 881a5f4c6..000000000
--- a/igibson/utils/data_utils/sampling_task/sampling_loader.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import os
-
-from IPython import embed
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.simulator import Simulator
-from igibson.utils.utils import parse_config
-
-task_choices = [
- "packing_lunches_filtered",
- "assembling_gift_baskets_filtered",
- "organizing_school_stuff_filtered",
- "re-shelving_library_books_filtered",
- "serving_hors_d_oeuvres_filtered",
- "putting_away_toys_filtered",
- "putting_away_Christmas_decorations_filtered",
- "putting_dishes_away_after_cleaning_filtered",
- "cleaning_out_drawers_filtered",
-]
-task = "assembling_gift_baskets"
-task_id = 0
-scene = "Rs_int"
-num_init = 0
-
-config_file = os.path.join(igibson.example_config_path, "behavior_vr.yaml")
-env_config = parse_config(config_file)
-env_config["scene_id"] = scene
-env_config["task"] = task
-env_config["task_id"] = task_id
-env_config["instance_id"] = num_init
-urdf_file = "{}_task_{}_{}_{}".format(scene, task, task_id, num_init)
-env_config["urdf_file"] = urdf_file
-env_config["online_sampling"] = False
-env_config["not_load_object_categories"] = ["ceilings"]
-
-
-env = iGibsonEnv(
- config_file=env_config,
- mode="headless",
- use_pb_gui=True,
-)
-
-print("success", env.task.initialized)
-embed()
-
-while True:
- env.simulator.step()
- success, sorted_conditions = env.task.check_success()
- print("TASK SUCCESS:", success)
- if not success:
- print("FAILED CONDITIONS:", sorted_conditions["unsatisfied"])
- else:
- pass
diff --git a/igibson/utils/data_utils/sampling_task/sampling_test.py b/igibson/utils/data_utils/sampling_task/sampling_test.py
deleted file mode 100644
index 316d979e5..000000000
--- a/igibson/utils/data_utils/sampling_task/sampling_test.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-
-from IPython import embed
-
-import igibson
-from igibson.envs.igibson_env import iGibsonEnv
-from igibson.simulator import Simulator
-from igibson.utils.utils import parse_config
-
-activity = "assembling_gift_baskets"
-scene_id = "Rs_int"
-
-config_file = os.path.join(igibson.example_config_path, "behavior_vr.yaml")
-env_config = parse_config(config_file)
-env_config["scene_id"] = scene_id
-env_config["task"] = activity
-env_config["task_id"] = 0
-env_config["online_sampling"] = True
-env_config["load_clutter"] = False
-env_config["not_load_object_categories"] = ["ceilings"]
-
-
-env = iGibsonEnv(
- config_file=env_config,
- mode="headless",
- use_pb_gui=True,
-)
-
-print("success", env.task.initialized)
-embed()
-
-while True:
- env.simulator.step()
- success, sorted_conditions = env.task.check_success()
- print("TASK SUCCESS:", success)
- if not success:
- print("FAILED CONDITIONS:", sorted_conditions["unsatisfied"])
- else:
- pass
diff --git a/igibson/utils/filters.py b/igibson/utils/filters.py
index 8a881a09e..a92beb365 100644
--- a/igibson/utils/filters.py
+++ b/igibson/utils/filters.py
@@ -17,12 +17,6 @@ def estimate(self, observation):
"""
raise NotImplementedError
- def reset(self):
- """
- Resets this filter. Default is no-op.
- """
- pass
-
class MovingAverageFilter(Filter):
"""
@@ -58,12 +52,6 @@ def estimate(self, observation):
return self.past_samples_sum / self.num_samples
- def reset(self):
- # Clear internal state
- self.past_samples = []
- self.past_samples_sum *= 0.0
- self.num_samples = 0
-
class ExponentialAverageFilter(Filter):
"""
diff --git a/igibson/utils/generate_trav_map.py b/igibson/utils/generate_trav_map.py
index a80c7143c..a6d78aec6 100644
--- a/igibson/utils/generate_trav_map.py
+++ b/igibson/utils/generate_trav_map.py
@@ -37,7 +37,7 @@ def generate_trav_map(scene_name, scene_source, load_full_scene=True):
if not load_full_scene:
scene._set_first_n_objects(3)
s = Simulator(mode="headless", image_width=512, image_height=512, device_idx=0)
- s.import_scene(scene)
+ s.import_ig_scene(scene)
if load_full_scene:
scene.open_all_doors()
diff --git a/igibson/utils/ig_logging.py b/igibson/utils/ig_logging.py
index e0c069eca..4bd297b61 100644
--- a/igibson/utils/ig_logging.py
+++ b/igibson/utils/ig_logging.py
@@ -71,13 +71,15 @@ def __init__(
if self.task:
self.obj_body_id_to_name = {}
for obj_name, obj in self.task.object_scope.items():
- self.obj_body_id_to_name[obj.get_body_id()] = obj_name
+ if hasattr(obj, "body_id"):
+ self.obj_body_id_to_name[obj.get_body_id()] = obj_name
self.obj_body_id_to_name_str = dump_config(self.obj_body_id_to_name)
if self.task and self.filter_objects:
self.tracked_objects = {}
for obj_name, obj in self.task.object_scope.items():
- self.tracked_objects[obj.get_body_id()] = obj
+ if hasattr(obj, "body_id"):
+ self.tracked_objects[obj.get_body_id()] = obj
else:
self.tracked_objects = [p.getBodyUniqueId(i) for i in range(p.getNumBodies())]
@@ -92,14 +94,14 @@ def __init__(
self.persistent_frame_count = 0
# Handle of HDF5 file
self.hf = None
- # Name path data - used to extract data from data map and save to hdf5
+ # Name path data - used to extract data from data map and save to hd5
self.name_path_data = []
self.generate_name_path_data()
# Create data map
self.create_data_map()
def generate_name_path_data(self):
- """Generates lists of name paths for resolution in hdf5 saving.
+ """Generates lists of name paths for resolution in hd5 saving.
Eg. ['vr', 'vr_camera', 'right_eye_view']."""
self.name_path_data.extend([["frame_data"]])
@@ -284,8 +286,6 @@ def set_up_data_storage(self):
self.hf.attrs["/metadata/activity_definition"] = self.task.activity_definition
self.hf.attrs["/metadata/scene_id"] = self.task.scene.scene_id
self.hf.attrs["/metadata/obj_body_id_to_name"] = self.obj_body_id_to_name_str
- self.hf.attrs["/metadata/urdf_file"] = self.task.scene.fname
-
# VR config YML is stored as a string in metadata
if self.store_vr:
self.hf.attrs["/metadata/vr_settings"] = self.sim.vr_settings.dump_vr_settings()
@@ -348,8 +348,8 @@ def write_vr_data_to_map(self):
if self.vr_robot:
forces = {
- "left_controller": p.getConstraintState(self.vr_robot.links["left_hand"].movement_cid),
- "right_controller": p.getConstraintState(self.vr_robot.links["right_hand"].movement_cid),
+ "left_controller": p.getConstraintState(self.vr_robot.parts["left_hand"].movement_cid),
+ "right_controller": p.getConstraintState(self.vr_robot.parts["right_hand"].movement_cid),
}
for device in ["hmd", "left_controller", "right_controller"]:
is_valid, trans, rot = self.sim.get_data_for_vr_device(device)
@@ -369,9 +369,9 @@ def write_vr_data_to_map(self):
else:
# Calculate model rotation and store
if device == "left_controller":
- base_rot = self.vr_robot.links["left_hand"].base_rot
+ base_rot = self.vr_robot.parts["left_hand"].base_rot
else:
- base_rot = self.vr_robot.links["right_hand"].base_rot
+ base_rot = self.vr_robot.parts["right_hand"].base_rot
controller_rot = rot
# Use dummy translation to calculation final rotation
final_rot = p.multiplyTransforms([0, 0, 0], controller_rot, [0, 0, 0], base_rot)[1]
@@ -395,10 +395,9 @@ def write_vr_data_to_map(self):
vr_pos_data.extend(list(self.sim.get_vr_pos()))
vr_pos_data.extend(list(self.sim.get_vr_offset()))
if self.vr_robot:
- vr_pos_data.extend(p.getConstraintState(self.vr_robot.links["body"].movement_cid))
+ vr_pos_data.extend(p.getConstraintState(self.vr_robot.parts["body"].movement_cid))
self.data_map["vr"]["vr_device_data"]["vr_position_data"][self.frame_counter, ...] = np.array(vr_pos_data)
- # On systems where eye tracking is not supported, we get dummy data and a guaranteed False validity reading
is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = self.sim.get_eye_tracking_data()
if is_valid:
eye_data_list = [is_valid]
@@ -490,8 +489,8 @@ def process_frame(self):
-- updating pybullet data
-- incrementing frame counter by 1
"""
+ self.write_frame_data_to_map()
if self.store_vr:
- self.write_frame_data_to_map()
self.write_vr_data_to_map()
self.write_pybullet_data_to_map()
if self.task:
@@ -501,8 +500,9 @@ def process_frame(self):
self.frame_counter += 1
self.persistent_frame_count += 1
if self.frame_counter >= self.frames_before_write:
- # We have accumulated enough data, which we will write to hdf5
- self.write_to_hdf5()
+ self.frame_counter = 0
+ # We have accumulated enough data, which we will write to hd5
+ self.write_to_hd5()
def refresh_data_map(self):
"""Resets all values stored in self.data_map to the default sentinel value.
@@ -512,35 +512,27 @@ def refresh_data_map(self):
np_data = self.get_data_for_name_path(name_path)
np_data.fill(self.default_fill_sentinel)
- def write_to_hdf5(self):
- """Writes data stored in self.data_map to hdf5.
+ def write_to_hd5(self):
+ """Writes data stored in self.data_map to hd5.
The data is saved each time this function is called, so data
will be saved even if a Ctrl+C event interrupts the program."""
if self.log_status:
- print("----- Writing log data to hdf5 on frame: {0} -----".format(self.persistent_frame_count))
-
+ print("----- Writing log data to hd5 on frame: {0} -----".format(self.persistent_frame_count))
start_time = time.time()
+ for name_path in self.name_path_data:
+ curr_dset = self.hf["/".join(name_path)]
+ # Resize to accommodate new data
+ curr_dset.resize(curr_dset.shape[0] + self.frames_before_write, axis=0)
+ # Set last self.frames_before_write rows to numpy data from data map
+ curr_dset[-self.frames_before_write :, ...] = self.get_data_for_name_path(name_path)
- frames_to_write = self.persistent_frame_count - self.hf["frame_data"].shape[0]
- if frames_to_write > 0:
- for name_path in self.name_path_data:
- curr_dset = self.hf["/".join(name_path)]
- # Resize to accommodate new data
- curr_dset.resize(curr_dset.shape[0] + frames_to_write, axis=0)
- # Set the last frames_to_write rows to numpy data from data map
- curr_dset[-frames_to_write:, ...] = self.get_data_for_name_path(name_path)[:frames_to_write, ...]
-
- self.refresh_data_map()
- self.frame_counter = 0
-
+ self.refresh_data_map()
delta = time.time() - start_time
if self.profiling_mode:
print("Time to write: {0}".format(delta))
def end_log_session(self):
"""Closes hdf5 log file at end of logging session."""
- # Write the remaining data to hdf
- self.write_to_hdf5()
if self.log_status:
print("IG LOGGER INFO: Ending log writing session after {} frames".format(self.persistent_frame_count))
self.hf.close()
@@ -579,14 +571,6 @@ def has_metadata_attr(vr_log_path, attr_name):
f = h5py.File(vr_log_path, "r")
return attr_name in f.attrs
- @staticmethod
- def get_all_metadata_attrs(vr_log_path):
- """
- Returns a list of available metadata attributes
- """
- f = h5py.File(vr_log_path, "r")
- return f.attrs
-
@staticmethod
def read_metadata_attr(vr_log_path, attr_name):
"""
diff --git a/igibson/utils/motion_planning_wrapper.py b/igibson/utils/motion_planning_wrapper.py
index cbf4c91ed..a1fbf049c 100644
--- a/igibson/utils/motion_planning_wrapper.py
+++ b/igibson/utils/motion_planning_wrapper.py
@@ -23,7 +23,7 @@
from igibson.objects.visual_marker import VisualMarker
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
-from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d
+from igibson.utils.utils import l2_distance, quatToXYZW, rotate_vector_2d
class MotionPlanningWrapper(object):
@@ -38,7 +38,7 @@ def __init__(self, env=None, base_mp_algo="birrt", arm_mp_algo="birrt", optimize
self.env = env
assert "occupancy_grid" in self.env.output
# get planning related parameters from env
- self.robot_id = self.env.robots[0].get_body_id()
+ self.robot_id = self.env.robots[0].robot_ids[0]
# self.mesh_id = self.scene.mesh_body_id
# mesh id should not be used
self.map_size = self.env.scene.trav_map_original_size * self.env.scene.trav_map_default_resolution
@@ -68,7 +68,7 @@ def __init__(self, env=None, base_mp_algo="birrt", arm_mp_algo="birrt", optimize
self.marker = None
self.marker_direction = None
- if self.mode in ["gui_non_interactive", "gui_interactive"]:
+ if self.mode in ["gui", "iggui"]:
self.marker = VisualMarker(radius=0.04, rgba_color=[0, 0, 1, 1])
self.marker_direction = VisualMarker(
visual_shape=p.GEOM_CAPSULE,
@@ -77,8 +77,8 @@ def __init__(self, env=None, base_mp_algo="birrt", arm_mp_algo="birrt", optimize
initial_offset=[0, 0, -0.1],
rgba_color=[0, 0, 1, 1],
)
- self.env.simulator.import_object(self.marker)
- self.env.simulator.import_object(self.marker_direction)
+ self.env.simulator.import_object(self.marker, use_pbr=False)
+ self.env.simulator.import_object(self.marker_direction, use_pbr=False)
def set_marker_position(self, pos):
"""
@@ -232,7 +232,7 @@ def dry_run_base_plan(self, path):
:param path: base waypoints or None if no plan can be found
"""
if path is not None:
- if self.mode in ["gui_non_interactive", "gui_interactive"]:
+ if self.mode in ["gui", "iggui", "pbgui"]:
for way_point in path:
set_base_values_with_z(
self.robot_id, [way_point[0], way_point[1], way_point[2]], z=self.initial_height
@@ -351,12 +351,12 @@ def get_arm_joint_positions(self, arm_ik_goal):
# self.episode_metrics['arm_ik_time'] += time() - ik_start
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
return arm_joint_positions
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
# self.episode_metrics['arm_ik_time'] += time() - ik_start
return None
@@ -474,7 +474,7 @@ def plan_arm_motion(self, arm_joint_positions):
allow_collision_links=allow_collision_links,
)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
- restoreState(state_id)
+ p.restoreState(state_id)
p.removeState(state_id)
return arm_path
@@ -486,7 +486,7 @@ def dry_run_arm_plan(self, arm_path):
"""
base_pose = get_base_values(self.robot_id)
if arm_path is not None:
- if self.mode in ["gui_non_interactive", "gui_interactive"]:
+ if self.mode in ["gui", "iggui", "pbgui"]:
for joint_way_point in arm_path:
set_joint_positions(self.robot_id, self.arm_joint_ids, joint_way_point)
set_base_values_with_z(self.robot_id, base_pose, z=self.initial_height)
@@ -568,7 +568,7 @@ def interact(self, push_point, push_direction):
self.simulator_step()
set_base_values_with_z(self.robot_id, base_pose, z=self.initial_height)
- if self.mode == "gui_interactive":
+ if self.mode in ["pbgui", "iggui", "gui"]:
sleep(0.02) # for visualization
def execute_arm_push(self, plan, hit_pos, hit_normal):
diff --git a/igibson/utils/muvr_utils.py b/igibson/utils/muvr_utils.py
index 7696ff50f..86b0b1ba4 100644
--- a/igibson/utils/muvr_utils.py
+++ b/igibson/utils/muvr_utils.py
@@ -11,6 +11,7 @@
from PodSixNet.Connection import ConnectionListener, connection
from PodSixNet.Server import Server
+from igibson.render.mesh_renderer.mesh_renderer_cpu import Instance, InstanceGroup
from igibson.utils.vr_utils import VrData
# An FPS cap is needed to ensure that the client and server don't fall too far out of sync
@@ -46,19 +47,25 @@ def ingest_frame_data(self):
self.latest_frame_data = copy.deepcopy(self.frame_data)
for instance in self.renderer.get_instances():
data = self.latest_frame_data[instance.pybullet_uuid]
- poses_trans = []
- poses_rot = []
- data_trans = data[0]
- data_rot = data[1]
- num_links = len(data_trans)
- for i in range(num_links):
- next_trans = np.array(data_trans[i])
- next_rot = np.array(data_rot[i])
- poses_trans.append(np.ascontiguousarray(next_trans))
- poses_rot.append(np.ascontiguousarray(next_rot))
-
- instance.poses_trans = poses_trans
- instance.poses_rot = poses_rot
+ if isinstance(instance, Instance):
+ trans = np.array(data[0])
+ rot = np.array(data[1])
+ instance.pose_trans = trans
+ instance.pose_rot = rot
+ elif isinstance(instance, InstanceGroup):
+ poses_trans = []
+ poses_rot = []
+ data_trans = data[0]
+ data_rot = data[1]
+ num_links = len(data_trans)
+ for i in range(num_links):
+ next_trans = np.array(data_trans[i])
+ next_rot = np.array(data_rot[i])
+ poses_trans.append(np.ascontiguousarray(next_trans))
+ poses_rot.append(np.ascontiguousarray(next_rot))
+
+ instance.poses_trans = poses_trans
+ instance.poses_rot = poses_rot
def client_step(self):
self.s.viewer.update()
@@ -185,13 +192,18 @@ def gen_frame_data(self):
# Loop through all instances and get pos and rot data
# We convert numpy arrays into lists so they can be serialized and sent over the network
# Lists can also be easily reconstructed back into numpy arrays on the client side
- poses = []
- rots = []
- for pose in instance.poses_trans:
- poses.append(pose.tolist())
- for rot in instance.poses_rot:
- rots.append(rot.tolist())
- self.frame_data[instance.pybullet_uuid] = [poses, rots]
+ if isinstance(instance, Instance):
+ pose = instance.pose_trans.tolist()
+ rot = instance.pose_rot.tolist()
+ self.frame_data[instance.pybullet_uuid] = [pose, rot]
+ elif isinstance(instance, InstanceGroup):
+ poses = []
+ rots = []
+ for pose in instance.poses_trans:
+ poses.append(pose.tolist())
+ for rot in instance.poses_rot:
+ rots.append(rot.tolist())
+ self.frame_data[instance.pybullet_uuid] = [poses, rots]
def send_frame_data(self):
if self.client:
diff --git a/igibson/utils/python_utils.py b/igibson/utils/python_utils.py
deleted file mode 100644
index fb3b3641c..000000000
--- a/igibson/utils/python_utils.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
-A set of utility functions for general python usage
-"""
-import inspect
-from copy import deepcopy
-
-
-def merge_nested_dicts(base_dict, extra_dict, verbose=False):
- """
- Iteratively updates @base_dict with values from @extra_dict. Note: This generates a new dictionary!
- Args:
- base_dict (dict): Nested base dictionary, which should be updated with all values from @extra_dict
- extra_dict (dict): Nested extra dictionary, whose values will overwrite corresponding ones in @base_dict
- verbose (bool): If True, will print when keys are mismatched
- Returns:
- dict: Updated dictionary
- """
- # Loop through all keys in @extra_dict and update the corresponding values in @base_dict
- base_dict = deepcopy(base_dict)
- for k, v in extra_dict.items():
- if k not in base_dict:
- base_dict[k] = v
- else:
- if isinstance(v, dict):
- if isinstance(base_dict[k], dict):
- base_dict[k] = merge_nested_dicts(base_dict[k], v)
- else:
- if base_dict[k] != v and verbose:
- print(f"Different values for key {k}: {base_dict[k]}, {v}\n")
- base_dict[k] = v
- else:
- if base_dict[k] != v and verbose:
- print(f"Different values for key {k}: {base_dict[k]}, {v}\n")
- base_dict[k] = v
-
- # Return new dict
- return base_dict
-
-
-def get_class_init_kwargs(cls):
- """
- Helper function to return a list of all valid keyword arguments (excluding "self") for the given @cls class.
- Args:
- cls (object): Class from which to grab __init__ kwargs
- Returns:
- list: All keyword arguments (excluding "self") specified by @cls __init__ constructor method
- """
- return list(inspect.signature(cls.__init__).parameters.keys())[1:]
-
-
-def extract_subset_dict(dic, keys, copy=False):
- """
- Helper function to extract a subset of dictionary key-values from a current dictionary. Optionally (deep)copies
- the values extracted from the original @dic if @copy is True.
- Args:
- dic (dict): Dictionary containing multiple key-values
- keys (Iterable): Specific keys to extract from @dic. If the key doesn't exist in @dic, then the key is skipped
- copy (bool): If True, will deepcopy all values corresponding to the specified @keys
- Returns:
- dict: Extracted subset dictionary containing only the specified @keys and their corresponding values
- """
- subset = {k: dic[k] for k in keys if k in dic}
- return deepcopy(subset) if copy else subset
-
-
-def extract_class_init_kwargs_from_dict(cls, dic, copy=False):
- """
- Helper function to return a dictionary of key-values that specifically correspond to @cls class's __init__
- constructor method, from @dic which may or may not contain additional, irrelevant kwargs.
- Note that @dic may possibly be missing certain kwargs as specified by cls.__init__. No error will be raised.
- Args:
- cls (object): Class from which to grab __init__ kwargs that will be be used as filtering keys for @dic
- dic (dict): Dictionary containing multiple key-values
- copy (bool): If True, will deepcopy all values corresponding to the specified @keys
- Returns:
- dict: Extracted subset dictionary possibly containing only the specified keys from cls.__init__ and their
- corresponding values
- """
- # extract only relevant kwargs for this specific backbone
- return extract_subset_dict(
- dic=dic,
- keys=get_class_init_kwargs(cls),
- copy=copy,
- )
-
-
-def assert_valid_key(key, valid_keys, name=None):
- """
- Helper function that asserts that @key is in dictionary @valid_keys keys. If not, it will raise an error.
-
- :param key: Any, key to check for in dictionary @dic's keys
- :param valid_keys: Iterable, contains keys should be checked with @key
- :param name: str or None, if specified, is the name associated with the key that will be printed out if the
- key is not found. If None, default is "value"
- """
- if name is None:
- name = "value"
- assert key in valid_keys, "Invalid {} received! Valid options are: {}, got: {}".format(name, valid_keys, key)
diff --git a/igibson/utils/sampling_utils.py b/igibson/utils/sampling_utils.py
index 7eda4b63d..16958160f 100644
--- a/igibson/utils/sampling_utils.py
+++ b/igibson/utils/sampling_utils.py
@@ -3,13 +3,11 @@
import numpy as np
import pybullet as p
-import trimesh
from scipy.spatial.transform import Rotation
from scipy.stats import truncnorm
import igibson
from igibson.objects.visual_marker import VisualMarker
-from igibson.utils import utils
_DEFAULT_AABB_OFFSET = 0.1
_PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE = 1.0 # Around 60 degrees
@@ -52,10 +50,9 @@ def draw_debug_markers(hit_positions):
color = np.concatenate([np.random.rand(3), [1]])
for vec in hit_positions:
m = VisualMarker(rgba_color=color, radius=0.001, initial_offset=vec)
- # This feature is not usable out-of-the-box since this function (or its callers) don't have access to a
- # simulator instance to be able to load this marker into. You can work around this by creating a variable
- # inside the simulator.py file called SIMULATOR and making it refer to the global simulator instance.
- # simulator.SIMULATOR.import_object(m)
+ # This is normally very illegal - the simulator should do it.
+ # But since this is debug mode only I think we'll be fine.
+ m.load()
def get_parallel_rays(
@@ -107,7 +104,7 @@ def get_parallel_rays(
return sources, destinations, ray_grid
-def sample_origin_positions(mins, maxes, count, bimodal_mean_fraction, bimodal_stdev_fraction, axis_probabilities):
+def sample_origin_positions(mins, maxes, count, bimodal_mean_fraction, bimodal_stdev_fraction, axis_probabilities, random_seed_number=0):
"""
Sample ray casting origin positions with a given distribution.
@@ -124,10 +121,13 @@ def sample_origin_positions(mins, maxes, count, bimodal_mean_fraction, bimodal_s
:param bimodal_stdev_fraction: float, the standard deviation of one side of the symmetric bimodal distribution as a
fraction of the min-max range.
:param axis_probabilities: Array of shape (3, ), the probability of ray casting along each axis.
+ :param random_seed_number: int, a number to seed np.random.seed when sampling, so that samples are produced in a
+ deterministic fashion
:return: List of (ray cast axis index, bool whether the axis was sampled from the top side, [x, y, z]) tuples.
"""
assert len(mins.shape) == 1
assert mins.shape == maxes.shape
+ np.random.seed(seed=random_seed_number)
results = []
for i in range(count):
@@ -173,6 +173,7 @@ def sample_cuboid_on_object(
max_angle_with_z_axis=_DEFAULT_MAX_ANGLE_WITH_Z_AXIS,
hit_to_plane_threshold=_DEFAULT_HIT_TO_PLANE_THRESHOLD,
refuse_downwards=False,
+ random_seed_number=0
):
"""
Samples points on an object's surface using ray casting.
@@ -198,13 +199,22 @@ def sample_cuboid_on_object(
:param hit_to_plane_threshold: float, how far any given hit position can be from the least-squares fit plane to
all of the hit positions before the sample is rejected.
:param refuse_downwards: bool, whether downward-facing hits (as defined by max_angle_with_z_axis) are allowed.
+ :param random_seed_number: int, a number to seed np.random.seed when sampling, so that samples are produced in a
+ deterministic fashion
:return: List of num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
filled if the debug_sampling flag is globally set to True.
"""
- bbox_center, bbox_orn, bbox_bf_extent, _ = obj.get_base_aligned_bounding_box(xy_aligned=True, fallback_to_aabb=True)
- half_extent_with_offset = (bbox_bf_extent / 2) + aabb_offset
+ # This is imported here to avoid a circular import with object_states.
+ from igibson.object_states import AABB
+
+ aabb = obj.states[AABB].get_value()
+ aabb_min = np.array(aabb[0])
+ aabb_max = np.array(aabb[1])
+
+ sampling_aabb_min = aabb_min - aabb_offset
+ sampling_aabb_max = aabb_max + aabb_offset
body_id = obj.get_body_id()
@@ -218,15 +228,14 @@ def sample_cuboid_on_object(
for i in range(num_samples):
# Sample the starting positions in advance.
- # TODO: Narrow down the sampling domain so that we don't sample scenarios where the center is in-domain but the
- # full extent isn't. Currently a lot of samples are being wasted because of this.
samples = sample_origin_positions(
- -half_extent_with_offset,
- half_extent_with_offset,
+ sampling_aabb_min,
+ sampling_aabb_max,
max_sampling_attempts,
bimodal_mean_fraction,
bimodal_stdev_fraction,
axis_probabilities,
+ random_seed_number
)
refusal_reasons = results[i][4]
@@ -234,56 +243,29 @@ def sample_cuboid_on_object(
# Try each sampled position in the AABB.
for axis, is_top, start_pos in samples:
# Compute the ray's destination using the sampling & AABB information.
- point_on_face = compute_ray_destination(
- axis, is_top, start_pos, -half_extent_with_offset, half_extent_with_offset
- )
+ point_on_face = compute_ray_destination(axis, is_top, start_pos, aabb_min, aabb_max)
# If we have a list of offset distances, pick the distance for this particular sample we're getting.
this_cuboid_dimensions = cuboid_dimensions if cuboid_dimensions.ndim == 1 else cuboid_dimensions[i]
# Obtain the parallel rays using the direction sampling method.
- bbf_sources, bbf_destinations, grid = get_parallel_rays(
- start_pos, point_on_face, this_cuboid_dimensions[:2] / 2.0
- )
-
- # Transform the sources, destinations and grid to the world frame coordinates.
- to_wf_transform = utils.quat_pos_to_mat(bbox_center, bbox_orn)
- sources = trimesh.transformations.transform_points(bbf_sources, to_wf_transform)
- destinations = trimesh.transformations.transform_points(bbf_destinations, to_wf_transform)
+ sources, destinations, grid = get_parallel_rays(start_pos, point_on_face, this_cuboid_dimensions[:2] / 2.0)
# Time to cast the rays.
cast_results = p.rayTestBatch(rayFromPositions=sources, rayToPositions=destinations, numThreads=0)
- # for ray_start, ray_end in zip(sources, destinations):
- # p.addUserDebugLine(ray_start, ray_end, lineWidth=4)
-
- threshold, hits = check_rays_hit_object(cast_results, body_id, refusal_reasons["missed_object"], 0.6)
- if not threshold:
- continue
-
- filtered_cast_results = []
- center_idx = int(len(cast_results) / 2)
- filtered_center_idx = None
- center_hit = False
- for idx, hit in enumerate(hits):
- if hit:
- filtered_cast_results.append(cast_results[idx])
- if idx == center_idx:
- center_hit = True
- filtered_center_idx = len(filtered_cast_results) - 1
-
- # Only consider objects whose center idx has a ray hit
- if not center_hit:
+ # Check that all rays hit the object.
+ if not check_rays_hit_object(cast_results, body_id, refusal_reasons["missed_object"]):
continue
# Process the hit positions and normals.
- hit_positions = np.array([ray_res[3] for ray_res in filtered_cast_results])
- hit_normals = np.array([ray_res[4] for ray_res in filtered_cast_results])
+ hit_positions = np.array([ray_res[3] for ray_res in cast_results])
+ hit_normals = np.array([ray_res[4] for ray_res in cast_results])
hit_normals /= np.linalg.norm(hit_normals, axis=1)[:, np.newaxis]
- assert filtered_center_idx
- hit_link = filtered_cast_results[filtered_center_idx][1]
- center_hit_normal = hit_normals[filtered_center_idx]
+ center_idx = int(len(cast_results) / 2)
+ hit_link = cast_results[center_idx][1]
+ center_hit_normal = hit_normals[center_idx]
# Reject anything facing more than 45deg downwards if requested.
if refuse_downwards:
@@ -301,10 +283,17 @@ def sample_cuboid_on_object(
# The fit_plane normal can be facing either direction on the normal axis, but we want it to face away from
# the object for purposes of normal checking and padding. To do this:
- # We get a vector from the centroid towards the center ray source, and flip the plane normal to match it.
- # The cosine has positive sign if the two vectors are similar and a negative one if not.
- plane_to_source = sources[center_idx] - plane_centroid
- plane_normal *= np.sign(np.dot(plane_to_source, plane_normal))
+ # First, we get the multiplier we'd need to multiply by to get this normal to face in the positive
+ # direction of our axis. This is simply the sign of our vector's component in our axis.
+ facing_multiplier = np.sign(plane_normal[axis])
+
+ # Then we invert the multiplier based on whether we want it to face in the positive or negative direction
+ # of the axis. This is determined by whether or not is_top is True, e.g. the sampler intended this as a
+ # positive-facing or negative-facing sample.
+ facing_multiplier *= 1 if is_top else -1
+
+ # Finally, we multiply the entire normal vector by the multiplier to make it face the correct direction.
+ plane_normal *= facing_multiplier
# Check that the plane normal is similar to the hit normal
if not check_normal_similarity(
@@ -315,12 +304,11 @@ def sample_cuboid_on_object(
# Check that the points are all within some acceptable distance of the plane.
distances = get_distance_to_plane(hit_positions, plane_centroid, plane_normal)
if np.any(distances > hit_to_plane_threshold):
- if igibson.debug_sampling:
+ if True:#igibson.debug_sampling:
refusal_reasons["dist_to_plane"].append("distances to plane: %r" % (distances,))
continue
# Get projection of the base onto the plane, fit a rotation, and compute the new center hit / corners.
- hit_positions = np.array([ray_res[3] for ray_res in cast_results])
projected_hits = get_projection_onto_plane(hit_positions, plane_centroid, plane_normal)
padding = _DEFAULT_CUBOID_BOTTOM_PADDING * plane_normal
projected_hits += padding
@@ -367,6 +355,190 @@ def sample_cuboid_on_object(
return results
+def check_sample_cuboid_on_object(
+ sampling_results,
+ obj,
+ num_samples,
+ cuboid_dimensions,
+ bimodal_mean_fraction,
+ bimodal_stdev_fraction,
+ axis_probabilities,
+ undo_padding=False,
+ aabb_offset=_DEFAULT_AABB_OFFSET,
+ max_sampling_attempts=_DEFAULT_MAX_SAMPLING_ATTEMPTS,
+ max_angle_with_z_axis=_DEFAULT_MAX_ANGLE_WITH_Z_AXIS,
+ hit_to_plane_threshold=_DEFAULT_HIT_TO_PLANE_THRESHOLD,
+ refuse_downwards=False,
+):
+ """
+ Samples points on an object's surface using ray casting.
+
+ :param obj: The object to sample points on.
+ :param num_samples: int, the number of points to try to sample.
+ :param cuboid_dimensions: Float sequence of len 3, the size of the empty cuboid we are trying to sample. Can also
+ provice list of cuboid dimension triplets in which case each i'th sample will be sampled using the i'th triplet.
+ :param bimodal_mean_fraction: float, the mean of one side of the symmetric bimodal distribution as a fraction of the
+ min-max range.
+ :param bimodal_stdev_fraction: float, the standard deviation of one side of the symmetric bimodal distribution as a
+ fraction of the min-max range.
+ :param axis_probabilities: Array of shape (3, ), the probability of ray casting along each axis.
+ :param undo_padding: bool. Whether the bottom padding that's applied to the cuboid should be removed before return.
+ Useful when the cuboid needs to be flush with the surface for whatever reason. Note that the padding will still
+ be applied initially (since it's not possible to do the cuboid emptiness check without doing this - otherwise
+ the rays will hit the sampled-on object), so the emptiness check still checks a padded cuboid. This flag will
+ simply make the sampler undo the padding prior to returning.
+ :param aabb_offset: float, padding for AABB to make sure rays start outside the actual object.
+ :param max_sampling_attempts: int, how many times sampling will be attempted for each requested point.
+ :param max_angle_with_z_axis: float, maximum angle between hit normal and positive Z axis allowed. Can be used to
+ disallow downward-facing hits when refuse_downwards=True.
+ :param hit_to_plane_threshold: float, how far any given hit position can be from the least-squares fit plane to
+ all of the hit positions before the sample is rejected.
+ :param refuse_downwards: bool, whether downward-facing hits (as defined by max_angle_with_z_axis) are allowed.
+ :return: List of num_samples elements where each element is a tuple in the form of
+ (cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
+ are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
+ filled if the debug_sampling flag is globally set to True.
+ """
+ # This is imported here to avoid a circular import with object_states.
+ from igibson.object_states import AABB
+
+ aabb = obj.states[AABB].get_value()
+ aabb_min = np.array(aabb[0])
+ aabb_max = np.array(aabb[1])
+
+ sampling_aabb_min = aabb_min - aabb_offset
+ sampling_aabb_max = aabb_max + aabb_offset
+
+ body_id = obj.get_body_id()
+
+ cuboid_dimensions = np.array(cuboid_dimensions)
+ assert cuboid_dimensions.ndim <= 2
+ assert cuboid_dimensions.shape[-1] == 3, "Cuboid dimensions need to contain all three dimensions."
+ if cuboid_dimensions.ndim == 2:
+ assert cuboid_dimensions.shape[0] == num_samples, "Need as many offsets as samples requested."
+
+ results = [(None, None, None, None, defaultdict(list)) for _ in range(num_samples)]
+
+ for i in range(num_samples):
+ # Sample the starting positions in advance.
+ samples = [[2, True, sampling_results[0][0], sampling_results[0][2]]]
+
+ refusal_reasons = results[i][4]
+
+ # Try each sampled position in the AABB.
+ for axis, is_top, start_pos, rot_quat in samples:
+ # Compute the ray's destination using the sampling & AABB information.
+ point_on_face = compute_ray_destination(axis, is_top, start_pos, aabb_min, aabb_max)
+
+ # If we have a list of offset distances, pick the distance for this particular sample we're getting.
+ this_cuboid_dimensions = cuboid_dimensions if cuboid_dimensions.ndim == 1 else cuboid_dimensions[i]
+
+ # Obtain the parallel rays using the direction sampling method.
+ sources, destinations, grid = get_parallel_rays(start_pos, point_on_face, this_cuboid_dimensions[:2] / 2.0)
+
+ # Time to cast the rays.
+ cast_results = p.rayTestBatch(rayFromPositions=sources, rayToPositions=destinations, numThreads=0)
+
+ # Check that all rays hit the object.
+ if not check_rays_hit_object(cast_results, body_id, refusal_reasons["missed_object"]):
+ continue
+
+ # Process the hit positions and normals.
+ hit_positions = np.array([ray_res[3] for ray_res in cast_results])
+ hit_normals = np.array([ray_res[4] for ray_res in cast_results])
+ hit_normals /= np.linalg.norm(hit_normals, axis=1)[:, np.newaxis]
+
+ center_idx = int(len(cast_results) / 2)
+ hit_link = cast_results[center_idx][1]
+ center_hit_normal = hit_normals[center_idx]
+
+ # Reject anything facing more than 45deg downwards if requested.
+ if refuse_downwards:
+ if not check_hit_max_angle_from_z_axis(
+ center_hit_normal, max_angle_with_z_axis, refusal_reasons["downward_normal"]
+ ):
+ continue
+
+ # Check that none of the parallel rays' hit normal differs from center ray by more than threshold.
+ if not check_normal_similarity(center_hit_normal, hit_normals, refusal_reasons["hit_normal_similarity"]):
+ continue
+
+ # Fit a plane to the points.
+ plane_centroid, plane_normal = fit_plane(hit_positions)
+
+ # The fit_plane normal can be facing either direction on the normal axis, but we want it to face away from
+ # the object for purposes of normal checking and padding. To do this:
+ # First, we get the multiplier we'd need to multiply by to get this normal to face in the positive
+ # direction of our axis. This is simply the sign of our vector's component in our axis.
+ facing_multiplier = np.sign(plane_normal[axis])
+
+ # Then we invert the multiplier based on whether we want it to face in the positive or negative direction
+ # of the axis. This is determined by whether or not is_top is True, e.g. the sampler intended this as a
+ # positive-facing or negative-facing sample.
+ facing_multiplier *= 1 if is_top else -1
+
+ # Finally, we multiply the entire normal vector by the multiplier to make it face the correct direction.
+ plane_normal *= facing_multiplier
+
+ # Check that the plane normal is similar to the hit normal
+ if not check_normal_similarity(
+ center_hit_normal, plane_normal[None, :], refusal_reasons["plane_normal_similarity"]
+ ):
+ continue
+
+ # Check that the points are all within some acceptable distance of the plane.
+ distances = get_distance_to_plane(hit_positions, plane_centroid, plane_normal)
+ if np.any(distances > hit_to_plane_threshold):
+ if True: #igibson.debug_sampling:
+ refusal_reasons["dist_to_plane"].append("distances to plane: %r" % (distances,))
+ continue
+
+ # Get projection of the base onto the plane, fit a rotation, and compute the new center hit / corners.
+ projected_hits = get_projection_onto_plane(hit_positions, plane_centroid, plane_normal)
+ padding = _DEFAULT_CUBOID_BOTTOM_PADDING * plane_normal
+ projected_hits += padding
+ center_projected_hit = projected_hits[center_idx]
+ cuboid_centroid = center_projected_hit + plane_normal * this_cuboid_dimensions[2] / 2.0
+ rotation = Rotation.from_quat(rot_quat) #compute_rotation_from_grid_sample(grid, projected_hits, cuboid_centroid, this_cuboid_dimensions)
+ corner_positions = cuboid_centroid[None, :] + (
+ rotation.apply(
+ 0.5
+ * this_cuboid_dimensions
+ * np.array(
+ [
+ [1, 1, -1],
+ [-1, 1, -1],
+ [-1, -1, -1],
+ [1, -1, -1],
+ ]
+ )
+ )
+ )
+
+ # Now we use the cuboid's diagonals to check that the cuboid is actually empty.
+ if not check_cuboid_empty(
+ plane_normal, corner_positions, refusal_reasons["cuboid_not_empty"], this_cuboid_dimensions
+ ):
+ continue
+
+ if undo_padding:
+ cuboid_centroid -= padding
+
+ # We've found a nice attachment point. Continue onto next point to sample.
+ results[i] = (cuboid_centroid, plane_normal, rotation.as_quat(), hit_link, refusal_reasons)
+ break
+
+ if igibson.debug_sampling:
+ print("Sampling rejection reasons:")
+ counter = Counter()
+
+ for instance in results:
+ for reason, refusals in instance[-1].items():
+ counter[reason] += len(refusals)
+
+ print("\n".join("%s: %d" % pair for pair in counter.items()))
+
+ return results[0][0] is not None
def compute_rotation_from_grid_sample(two_d_grid, hit_positions, cuboid_centroid, this_cuboid_dimensions):
# TODO: Figure out if the normalization has any advantages.
@@ -403,7 +575,7 @@ def check_normal_similarity(center_hit_normal, hit_normals, refusal_log):
parallel_hit_normal_angles_to_hit_normal < _PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE
)
if not all_rays_hit_with_similar_normal:
- if igibson.debug_sampling:
+ if True: #igibson.debug_sampling:
refusal_log.append("angles %r" % (np.rad2deg(parallel_hit_normal_angles_to_hit_normal),))
return False
@@ -411,22 +583,21 @@ def check_normal_similarity(center_hit_normal, hit_normals, refusal_log):
return True
-def check_rays_hit_object(cast_results, body_id, refusal_log, threshold=1.0):
+def check_rays_hit_object(cast_results, body_id, refusal_log):
hit_body_ids = [ray_res[0] for ray_res in cast_results]
- ray_hits = list(hit_body_id == body_id for hit_body_id in hit_body_ids)
- if not (sum(ray_hits) / len(hit_body_ids)) >= threshold:
- if igibson.debug_sampling:
+ if not all(hit_body_id == body_id for hit_body_id in hit_body_ids):
+ if True: #igibson.debug_sampling:
refusal_log.append("hits %r" % hit_body_ids)
- return False, ray_hits
+ return False
- return True, ray_hits
+ return True
def check_hit_max_angle_from_z_axis(hit_normal, max_angle_with_z_axis, refusal_log):
hit_angle_with_z = np.arccos(np.clip(np.dot(hit_normal, np.array([0, 0, 1])), -1.0, 1.0))
if hit_angle_with_z > max_angle_with_z_axis:
- if igibson.debug_sampling:
+ if True: #igibson.debug_sampling:
refusal_log.append("normal %r" % hit_normal)
return False
@@ -466,9 +637,6 @@ def compute_ray_destination(axis, is_top, start_pos, aabb_min, aabb_max):
def check_cuboid_empty(hit_normal, bottom_corner_positions, refusal_log, this_cuboid_dimensions):
- if igibson.debug_sampling:
- draw_debug_markers(bottom_corner_positions)
-
# Compute top corners.
top_corner_positions = bottom_corner_positions + hit_normal * this_cuboid_dimensions[2]
@@ -486,7 +654,7 @@ def check_cuboid_empty(hit_normal, bottom_corner_positions, refusal_log, this_cu
rayFromPositions=all_pairs[:, 0, :], rayToPositions=all_pairs[:, 1, :], numThreads=0
)
if not all(ray[0] == -1 for ray in check_cast_results):
- if igibson.debug_sampling:
+ if True: #igibson.debug_sampling:
refusal_log.append("check ray info: %r" % (check_cast_results,))
return False
diff --git a/igibson/utils/semantics_utils.py b/igibson/utils/semantics_utils.py
index cf479cbd4..2fd0b0318 100644
--- a/igibson/utils/semantics_utils.py
+++ b/igibson/utils/semantics_utils.py
@@ -18,6 +18,3 @@ def get_class_name_to_class_id(starting_class_id=SemanticClass.SCENE_OBJS + 1):
class_name_to_class_id[line.strip()] = starting_class_id
starting_class_id += 1
return class_name_to_class_id
-
-
-CLASS_NAME_TO_CLASS_ID = get_class_name_to_class_id()
diff --git a/igibson/utils/urdf_utils.py b/igibson/utils/urdf_utils.py
index 07081b57d..46a47e058 100644
--- a/igibson/utils/urdf_utils.py
+++ b/igibson/utils/urdf_utils.py
@@ -223,8 +223,8 @@ def save_urdfs_without_floating_joints(tree, main_body_is_fixed, file_prefix):
extended_splitted_dict[esd][3],
transformation,
)
- logging.info("Number of splits: " + str(len(extended_splitted_dict)))
- logging.info("Instantiating scene into the following urdfs:")
+ # logging.info("Number of splits: " + str(len(extended_splitted_dict)))
+ # logging.info("Instantiating scene into the following urdfs:")
main_link_name = get_base_link_name(tree)
@@ -254,7 +254,7 @@ def save_urdfs_without_floating_joints(tree, main_body_is_fixed, file_prefix):
transformation = extended_splitted_dict[esd_key][4]
urdfs_no_floating[esd_key] = (urdf_file_name, transformation, is_fixed, is_main_body)
xml_tree_parent.write(urdf_file_name, xml_declaration=True)
- logging.info(urdf_file_name)
+ # logging.info(urdf_file_name)
# There should be exactly one main body
assert np.sum([val[3] for val in urdfs_no_floating.values()]) == 1
diff --git a/igibson/utils/utils.py b/igibson/utils/utils.py
index 615bb0027..740cfece8 100644
--- a/igibson/utils/utils.py
+++ b/igibson/utils/utils.py
@@ -1,8 +1,8 @@
import collections
import os
import random
-
import numpy as np
+
import pybullet as p
import scipy
import yaml
@@ -11,11 +11,8 @@
from scipy.spatial.transform import Rotation as R
from transforms3d import quaternions
-# The function to retrieve the rotation matrix changed from as_dcm to as_matrix in version 1.4
-# We will use the version number for backcompatibility
scipy_version = version.parse(scipy.version.version)
-
# File I/O related
@@ -179,18 +176,18 @@ def normalizeListVec(v):
return v
-# Quat(xyzw)
+# Quat(wxyz)
def quat_pos_to_mat(pos, quat):
"""Convert position and quaternion to transformation matrix"""
+ r_w, r_x, r_y, r_z = quat
+ # print("quat", r_w, r_x, r_y, r_z)
mat = np.eye(4)
- mat[:3, :3] = R.from_quat(quat).as_matrix()
+ mat[:3, :3] = quaternions.quat2mat([r_w, r_x, r_y, r_z])
mat[:3, -1] = pos
+ # Return: roll, pitch, yaw
return mat
-# Texture related
-
-
def transform_texture(input_filename, output_filename, mixture_weight=0, mixture_color=(0, 0, 0)):
img = np.array(Image.open(input_filename))
img = img * (1 - mixture_weight) + np.array(list(mixture_color))[None, None, :] * mixture_weight
@@ -205,12 +202,8 @@ def brighten_texture(input_filename, output_filename, brightness=1):
Image.fromarray(img).save(output_filename)
-# Other
-
-
def restoreState(*args, **kwargs):
"""Restore to a given pybullet state, with a mitigation for a known sleep state restore bug.
-
When the pybullet state is restored, the object's wake zone (the volume around the object where
if any other object enters, the object should be waken up) does not get reset correctly,
causing weird bugs around asleep objects. This function mitigates the issue by forcing the
@@ -239,3 +232,23 @@ def let_user_pick(options, print_intro=True, random_selection=False):
except:
pass
return None
+
+# To dynamically modify wbm3_modifiable_full_obs.yaml file
+def modify_config_file(config_file, task_name, scene_id, randomize_init_state, seed):
+ # Read in the file
+ with open(config_file, 'r') as file :
+ filedata = file.read()
+
+ # Replace the target string
+ filedata = filedata.replace('task_placeholder', task_name)
+ filedata = filedata.replace('scene_placeholder', scene_id)
+ filedata = filedata.replace('online_sampling_placeholder', str(randomize_init_state))
+ #print(filedata)
+
+ config_file = config_file + "_tmp_" + task_name + str(scene_id) + str(seed)
+
+ # Write the file out again
+ with open(config_file, 'w') as file:
+ file.write(filedata)
+
+ return config_file
\ No newline at end of file
diff --git a/igibson/utils/vision_utils.py b/igibson/utils/vision_utils.py
index 574ab7c2a..a316c7c99 100644
--- a/igibson/utils/vision_utils.py
+++ b/igibson/utils/vision_utils.py
@@ -1,7 +1,5 @@
-import colorsys
import random
-import numpy as np
from PIL import Image
try:
@@ -53,39 +51,3 @@ def __call__(self, img):
return img.resize((ow, oh), self.interpolation)
else:
raise NotImplementedError()
-
-
-def randomize_colors(N, bright=True):
- """
- Modified from https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/visualize.py#L59
- Generate random colors.
- To get visually distinct colors, generate them in HSV space then
- convert to RGB.
- """
- brightness = 1.0 if bright else 0.5
- hsv = [(1.0 * i / N, 1, brightness) for i in range(N)]
- colors = np.array(list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)))
- rstate = np.random.RandomState(seed=20)
- np.random.shuffle(colors)
- colors[0] = [0, 0, 0] # First color is black
- return colors
-
-
-def segmentation_to_rgb(seg_im, N, colors=None):
- """
- Helper function to visualize segmentations as RGB frames.
- NOTE: assumes that geom IDs go up to N at most - if not,
- multiple geoms might be assigned to the same color.
- """
- # ensure all values lie within [0, N]
- seg_im = np.mod(seg_im, N)
-
- if colors is None:
- use_colors = randomize_colors(N=N, bright=True)
- else:
- use_colors = colors
-
- if N <= 256:
- return (255.0 * use_colors[seg_im]).astype(np.uint8)
- else:
- return (use_colors[seg_im]).astype(np.float)
diff --git a/mac_behavior_installation.md b/mac_behavior_installation.md
new file mode 100644
index 000000000..142b8adc9
--- /dev/null
+++ b/mac_behavior_installation.md
@@ -0,0 +1,28 @@
+## Instructions for installing behavior on mac
+
+1. `pip install cmake`
+2. `git clone git@github.com:Learning-and-Intelligent-Systems/iGibson.git --recursive`
+3. `git clone git@github.com:Learning-and-Intelligent-Systems/bddl.git`
+4. Fill out https://forms.gle/GXAacjpnotKkM2An7 -- ask Nishanth for the signed form if you don't have it already. After submitting the form, make sure that you open each of the links in a new tab, otherwise you will be navigated away and will have to fill out the form again. I downloaded all of them but I'm going to ignore the third link for now. The second and third downloads are very large (40GB) and will take a long time.
+5. `mkdir iGibson/igibson/data`
+6. Put each of the three downloads in that new data dir.
+7. unzip behavior_data_bundle.zip (about 27 GB unzipped)
+8. `pip install -e ./iGibson` (this will also take a long time)
+9. `pip install -e ./bddl`
+10. `python -m igibson.utils.assets_utils --download_assets`
+
+## Hack around an issue
+Issue: https://github.com/StanfordVL/iGibson/issues/122
+
+To see if this issue is still present, try this:
+`python iGibson/igibson/examples/behavior/behavior_env_metrics.py`
+
+If that results in this error: `ERROR:root:Optimized renderer is not supported on Mac` then you will need a hack.
+
+In `iGibson/igibson/envs/env_base.py`, in the call to `MeshRendererSettings`, change `optimized=True` to `optimized=False`.
+
+## Check if it worked
+This should now run with a GUI:
+`python iGibson/igibson/examples/behavior/behavior_env_metrics.py -m pbgui`
+or a different GUI:
+`python iGibson/igibson/examples/behavior/behavior_env_metrics.py -m iggui`
diff --git a/pyproject.toml b/pyproject.toml
index 398e90355..09745ee4e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,46 +1,22 @@
[tool.black]
line-length = 120
target-version = ['py27', 'py36', 'py37']
-force-exclude = 'igibson/(data|external|render/(cpp|cryptopp|glad|glfw|glm|openvr|pybind11|sranipal))'
+extend-exclude = 'igibson/(data|external|render/(cpp|cryptopp|glad|glfw|glm|openvr|pybind11|sranipal))'
+# WIP.
[tool.isort]
profile = "black"
line_length = 120
py_version = 'all'
-filter_files = true
-extend_skip_glob = [
- 'igibson/data/*',
- 'igibson/external/*',
- 'igibson/render/cpp/*',
- 'igibson/render/cryptopp/*',
- 'igibson/render/glad/*',
- 'igibson/render/glfw/*',
- 'igibson/render/glm/*',
- 'igibson/render/openvr/*',
- 'igibson/render/pybind11/*',
- 'igibson/render/sranipal/*',
+extend_skip = [
+ 'igibson/data',
+ 'igibson/external',
+ 'igibson/render/cpp',
+ 'igibson/render/cryptopp',
+ 'igibson/render/glad',
+ 'igibson/render/glfw',
+ 'igibson/render/glm',
+ 'igibson/render/openvr',
+ 'igibson/render/pybind11',
+ 'igibson/render/sranipal',
]
-
-[tool.pyright]
-exclude = [
- 'igibson/data',
- 'igibson/docs',
- 'igibson/docker',
- 'igibson/render/cryptopp',
- 'igibson/render/cpp',
- 'igibson/render/openvr',
- 'igibson/render/glfw',
- 'igibson/render/glad',
- 'igibson/render/glm',
- 'igibson/render/pybind11',
- 'igibson/render/sranipal'
-]
-
-[tool.pytest.ini_options]
-testpaths = [
- "tests",
-]
-addopts = '--cov=igibson --cov-report=xml'
-
-[tool.coverage.run]
-omit = ["igibson/external/*"]
diff --git a/requirements-dev.txt b/requirements-dev.txt
deleted file mode 100644
index 83e80210c..000000000
--- a/requirements-dev.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-sphinx-markdown-tables~=0.0.15
-sphinx~=3.5.3
-recommonmark~=0.7.1
-sphinx-rtd-theme~=1.0.0
-pytest~=6.2.3
-torch>=1.7.0
-torchvision>=0.8.1
-flask~=1.1.2
-scikit-image~=0.16.2
-requests~=2.25.1
-pytest-cov>=3.0.0
-coverage[toml]>=6.1.1
-myst-parser>=0.15.2
diff --git a/setup.py b/setup.py
index 20cb55d66..8f1001665 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,4 @@
import codecs
-import os
import os.path
import platform
import re
@@ -88,9 +87,6 @@ def build_extension(self, ext):
else:
cmake_args += ["-DMAC_PLATFORM=FALSE"]
- if os.getenv("USE_VR"):
- cmake_args += ["-DUSE_VR=TRUE"]
-
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
@@ -130,7 +126,7 @@ def run(self):
setup(
name="igibson",
- version="2.0.4",
+ version="2.0.1",
author="Stanford University",
long_description_content_type="text/markdown",
long_description=long_description,
@@ -154,14 +150,17 @@ def run(self):
"aenum",
"GPUtil",
"ipython",
+ "pytest",
"future",
"trimesh",
+ "sphinx_markdown_tables",
+ "sphinx>=1.8.0",
+ "recommonmark",
+ "sphinx_rtd_theme",
"h5py",
"gitpython",
"py360convert",
- "six",
- "pandas",
- "packaging",
+ "bddl",
],
ext_modules=[CMakeExtension("MeshRendererContext", sourcedir="igibson/render")],
cmdclass=dict(build_ext=CMakeBuild),
diff --git a/tests/test_determinism_against_same_version.py b/tests/test_determinism_against_same_version.py
deleted file mode 100644
index 3b239fe86..000000000
--- a/tests/test_determinism_against_same_version.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-import tempfile
-
-from igibson.examples.learning.demo_collection_example import collect_demo
-from igibson.examples.learning.demo_replaying_example import safe_replay_demo
-
-
-def test_determinism_with_new_demo():
- # First record a random demo.
- with tempfile.TemporaryDirectory() as directory:
- demo_file = os.path.join(directory, "demo.hdf5")
- print("Saving demo.")
- collect_demo("Benevolence_1_int", "cleaning_out_drawers", 0, 0, log_path=demo_file)
-
- # Then replay the random demo.
- print("Replaying demo.")
- replay_file = os.path.join(directory, "replay.hdf5")
- safe_replay_demo(demo_file, out_log_path=replay_file, mode="headless")