diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index de9ba3afbe..7d342d07cb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -48,6 +48,8 @@ jobs: fetch-depth: 0 - name: Fetch tags run: git fetch --tags --force + - name: Install RoboRIO Toolchain + run: ./gradlew installRoboRioToolchain - name: Install Java 17 uses: actions/setup-java@v4 with: @@ -158,14 +160,14 @@ jobs: - run: git fetch --tags --force - run: | chmod +x gradlew - ./gradlew photon-targeting:build photon-lib:build -Pbuildalldesktop -i - - run: ./gradlew photon-lib:publish photon-targeting:publish -Pbuildalldesktop + ./gradlew photon-targeting:build photon-lib:build -i + - run: ./gradlew photon-lib:publish photon-targeting:publish name: Publish env: ARTIFACTORY_API_KEY: ${{ secrets.ARTIFACTORY_API_KEY }} if: github.event_name == 'push' && github.repository_owner == 'photonvision' # Copy artifacts to build/outputs/maven - - run: ./gradlew photon-lib:publish photon-targeting:publish -PcopyOfflineArtifacts -Pbuildalldesktop + - run: ./gradlew photon-lib:publish photon-targeting:publish -PcopyOfflineArtifacts - uses: actions/upload-artifact@v4 with: name: maven-${{ matrix.artifact-name }} @@ -359,7 +361,7 @@ jobs: - os: ubuntu-latest artifact-name: LinuxArm64 image_suffix: RaspberryPi - image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-1/photonvision_raspi.img.xz + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-4/photonvision_raspi.img.xz cpu: cortex-a7 image_additional_mb: 0 extraOpts: -Djdk.lang.Process.launchMechanism=vfork @@ -398,33 +400,45 @@ jobs: - os: ubuntu-latest artifact-name: LinuxArm64 image_suffix: RaspberryPi - image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-1/photonvision_raspi.img.xz + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-4/photonvision_raspi.img.xz cpu: cortex-a7 image_additional_mb: 0 - os: ubuntu-latest artifact-name: LinuxArm64 image_suffix: limelight2 - image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-1/photonvision_limelight.img.xz + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-4/photonvision_limelight.img.xz cpu: cortex-a7 image_additional_mb: 0 - os: ubuntu-latest artifact-name: LinuxArm64 image_suffix: limelight3 - image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-1/photonvision_limelight3.img.xz + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-4/photonvision_limelight3.img.xz cpu: cortex-a7 image_additional_mb: 0 - os: ubuntu-latest artifact-name: LinuxArm64 image_suffix: orangepi5 - image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2024.0.10/photonvision_opi5.img.xz + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-3/photonvision_opi5.img.xz cpu: cortex-a8 - image_additional_mb: 4096 + image_additional_mb: 1024 + - os: ubuntu-latest + artifact-name: LinuxArm64 + image_suffix: orangepi5b + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-3/photonvision_opi5b.img.xz + cpu: cortex-a8 + image_additional_mb: 1024 - os: ubuntu-latest artifact-name: LinuxArm64 image_suffix: orangepi5plus - image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2024.0.10/photonvision_opi5plus.img.xz + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-3/photonvision_opi5plus.img.xz cpu: cortex-a8 - image_additional_mb: 4096 + image_additional_mb: 1024 + - os: ubuntu-latest + artifact-name: LinuxArm64 + image_suffix: orangepi5pro + image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-3/photonvision_opi5pro.img.xz + cpu: cortex-a8 + image_additional_mb: 1024 runs-on: ${{ matrix.os }} name: "Build image - ${{ matrix.image_url }}" @@ -437,7 +451,7 @@ jobs: - uses: actions/download-artifact@v4 with: name: jar-${{ matrix.artifact-name }} - - uses: pguyot/arm-runner-action@v2 + - uses: pguyot/arm-runner-action@HEAD name: Generate image id: generate_image with: @@ -462,7 +476,7 @@ jobs: name: image-${{ matrix.image_suffix }} path: photonvision*.xz release: - needs: [build-package, build-image] + needs: [build-package, build-image, combine] runs-on: ubuntu-22.04 steps: # Download all fat JARs diff --git a/.github/workflows/lint-format.yml b/.github/workflows/lint-format.yml index fb97eb77af..09f86fc6c6 100644 --- a/.github/workflows/lint-format.yml +++ b/.github/workflows/lint-format.yml @@ -37,7 +37,7 @@ jobs: with: python-version: 3.11 - name: Install wpiformat - run: pip3 install wpiformat==2024.37 + run: pip3 install wpiformat==2024.41 - name: Run run: wpiformat - name: Check output diff --git a/.gitignore b/.gitignore index 65c9d83ebd..29438a5193 100644 --- a/.gitignore +++ b/.gitignore @@ -165,3 +165,6 @@ photon-server/src/main/resources/web/index.html photon-lib/src/generate/native/cpp/PhotonVersion.cpp venv + +.venv/* +.venv diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 773d340b3e..8b3ef1aca6 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -18,7 +18,7 @@ build: # If there are no changes (git diff exits with 0) we force the command to return with 183. # This is a special exit code on Read the Docs that will cancel the build immediately. - | - if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && git diff --quiet origin/master -- docs/ docs/.readthedocs.yaml; + if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && git diff --quiet origin/master -- docs/ .readthedocs.yaml; then exit 183; fi diff --git a/README.md b/README.md index 4fcf69c59e..475d386054 100644 --- a/README.md +++ b/README.md @@ -37,9 +37,11 @@ Note that these are case sensitive! * arm64 * x86-64 * x86 -- `-PtgtIp`: Specifies where `./gradlew deploy` should try to copy the fat JAR to +- `-PtgtIP`: Specifies where `./gradlew deploy` should try to copy the fat JAR to - `-Pprofile`: enables JVM profiling +If you're cross-compiling, you'll need the wpilib toolchain installed. This can be done via Gradle: for example `./gradlew installArm64Toolchain` or `./gradlew installRoboRioToolchain` + ## Out-of-Source Dependencies PhotonVision uses the following additonal out-of-source repositories for building code. diff --git a/build.gradle b/build.gradle index 9d43a514b4..18aac2cc63 100644 --- a/build.gradle +++ b/build.gradle @@ -1,12 +1,15 @@ import edu.wpi.first.toolchain.* plugins { + id "java" + id "cpp" id "com.diffplug.spotless" version "6.24.0" - id "edu.wpi.first.NativeUtils" version "2024.7.2" apply false + id "edu.wpi.first.NativeUtils" version "2024.6.1" apply false id "edu.wpi.first.wpilib.repositories.WPILibRepositoriesPlugin" version "2020.2" id "edu.wpi.first.GradleRIO" version "2024.3.2" id 'edu.wpi.first.WpilibTools' version '1.3.0' id 'com.google.protobuf' version '0.9.4' apply false + id 'edu.wpi.first.GradleJni' version '1.1.0' } allprojects { @@ -28,12 +31,12 @@ ext.allOutputsFolder = file("$project.buildDir/outputs") apply from: "versioningHelper.gradle" ext { - wpilibVersion = "2025.0.0-alpha-1" + wpilibVersion = "2024.3.2" wpimathVersion = wpilibVersion openCVversion = "4.8.0-2" joglVersion = "2.4.0" javalinVersion = "5.6.2" - libcameraDriverVersion = "dev-v2023.1.0-12-gfb1eafb" + libcameraDriverVersion = "dev-v2023.1.0-14-g787ab59" rknnVersion = "dev-v2024.0.1-4-g0db16ac" frcYear = "2024" mrcalVersion = "dev-v2024.0.0-24-gc1efcf0"; @@ -64,7 +67,7 @@ spotless { java { target fileTree('.') { include '**/*.java' - exclude '**/build/**', '**/build-*/**', "photon-core\\src\\main\\java\\org\\photonvision\\PhotonVersion.java", "photon-lib\\src\\main\\java\\org\\photonvision\\PhotonVersion.java" + exclude '**/build/**', '**/build-*/**', "photon-core\\src\\main\\java\\org\\photonvision\\PhotonVersion.java", "photon-lib\\src\\main\\java\\org\\photonvision\\PhotonVersion.java", "**/src/generated/**" } toggleOffOn() googleJavaFormat() diff --git a/devTools/calibrationUtils.py b/devTools/calibrationUtils.py index e12b15b250..676382c579 100644 --- a/devTools/calibrationUtils.py +++ b/devTools/calibrationUtils.py @@ -3,7 +3,6 @@ from dataclasses import dataclass import json import os -from typing import Union import cv2 import numpy as np import mrcal diff --git a/docs/.gitignore b/docs/.gitignore index f79cf99890..b7a9f77acb 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -3,8 +3,6 @@ build/* .vscode/* .idea/* source/_build -source/_build -photon-docs/build source/docs/_build venv/* diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml deleted file mode 100644 index 3b80b77b96..0000000000 --- a/docs/.readthedocs.yaml +++ /dev/null @@ -1,15 +0,0 @@ -version: 2 - -sphinx: - builder: html - configuration: source/conf.py - fail_on_warning: true - -build: - os: ubuntu-22.04 - tools: - python: "3.11" - -python: - install: - - requirements: requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt index 582e43a6c3..84c1582463 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -40,3 +40,5 @@ sphinxext-remoteliteralinclude==0.4.0 stevedore==5.1.0 urllib3==2.1.0 yarg==0.1.9 +sphinx-autobuild==2024.4.16 +myst_parser==3.0.1 diff --git a/docs/source/404.md b/docs/source/404.md new file mode 100644 index 0000000000..0af59ce7ad --- /dev/null +++ b/docs/source/404.md @@ -0,0 +1,7 @@ +--- +orphan: true +--- + +# Requested Page Not Found + +This page you were looking for was not found. If you think this is a mistake, [file an issue on our GitHub.](https://github.com/PhotonVision/photonvision-docs/issues) diff --git a/docs/source/404.rst b/docs/source/404.rst deleted file mode 100644 index ab9b20f84e..0000000000 --- a/docs/source/404.rst +++ /dev/null @@ -1,6 +0,0 @@ -:orphan: - -Requested Page Not Found -======================== - -This page you were looking for was not found. If you think this is a mistake, `file an issue on our GitHub. `__ diff --git a/docs/source/conf.py b/docs/source/conf.py index cfc08effe9..09c6604cc1 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -35,6 +35,7 @@ "sphinxext.opengraph", "sphinxcontrib.ghcontributors", "sphinx_design", + "myst_parser", ] # Configure OpenGraph support @@ -71,6 +72,8 @@ # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] +source_suffix = [".rst", ".md"] + def setup(app): app.add_css_file("css/pv-icons.css") @@ -124,3 +127,6 @@ def setup(app): # Excluded links for linkcheck # These should be periodically checked by hand to ensure that they are still functional linkcheck_ignore = ["https://www.raspberrypi.com/software/"] + +# MyST configuration (https://myst-parser.readthedocs.io/en/latest/configuration.html) +myst_enable_extensions = ["colon_fence"] diff --git a/docs/source/docs/additional-resources/best-practices.md b/docs/source/docs/additional-resources/best-practices.md new file mode 100644 index 0000000000..7dd11ecd0b --- /dev/null +++ b/docs/source/docs/additional-resources/best-practices.md @@ -0,0 +1,29 @@ +# Best Practices For Competition + +## Before Competition + +- Ensure you have spares of the relevant electronics if you can afford it (switch, coprocessor, cameras, etc.). +- Download the latest release .jar onto your computer and update your Pi if necessary (only update if the release is labeled "critical" or similar, we do not recommend updating right before an event in case there are unforeseen bugs). +- Test out PhotonVision at your home setup. +- Ensure that you have set up SmartDashboard / Shuffleboard to view your camera streams during matches. +- Follow all the recommendations under the Networking section in installation (network switch and static IP). +- Use high quality ethernet cables that have been rigorously tested. +- Set up port forwarding using the guide in the Networking section in installation. + +## During the Competition + +- Make sure you take advantage of the field calibration time given at the start of the event: + - Bring your robot to the field at the allotted time. + - Turn on your robot and pull up the dashboard on your driver station. + - Point your robot at the AprilTags(s) and ensure you get a consistent tracking (you hold one AprilTag consistently, the ceiling lights aren't detected, etc.). + - If you have problems with your pipeline, go to the pipeline tuning section and retune the pipeline using the guide there. + - Move the robot close, far, angled, and around the field to ensure no extra AprilTags are found. + - Go to a practice match to ensure everything is working correctly. +- After field calibration, use the "Export Settings" button in the "Settings" page to create a backup. + - Do this for each coprocessor on your robot that runs PhotonVision, and name your exports with meaningful names. + - This will contain camera information/calibration, pipeline information, network settings, etc. + - In the event of software/hardware failures (IE lost SD Card, broken device), you can then use the "Import Settings" button and select "All Settings" to restore your settings. + - This effectively works as a snapshot of your PhotonVision data that can be restored at any point. +- Before every match, check the ethernet connection going into your coprocessor and that it is seated fully. +- Ensure that exposure is as low as possible and that you don't have the dashboard up when you don't need it to reduce bandwidth. +- Stream at as low of a resolution as possible while still detecting AprilTags to stay within field bandwidth limits. diff --git a/docs/source/docs/additional-resources/best-practices.rst b/docs/source/docs/additional-resources/best-practices.rst deleted file mode 100644 index 075d083649..0000000000 --- a/docs/source/docs/additional-resources/best-practices.rst +++ /dev/null @@ -1,32 +0,0 @@ -Best Practices For Competition -============================== - -Before Competition ------------------- -* Ensure you have spares of the relevant electronics if you can afford it (switch, coprocessor, cameras, etc.). -* Download the latest release .jar onto your computer and update your Pi if necessary (only update if the release is labeled "critical" or similar, we do not recommend updating right before an event in case there are unforeseen bugs). -* Test out PhotonVision at your home setup. -* Ensure that you have set up SmartDashboard / Shuffleboard to view your camera streams during matches. -* Follow all the recommendations under the Networking section in installation (network switch and static IP). -* Use high quality ethernet cables that have been rigorously tested. -* Set up port forwarding using the guide in the Networking section in installation. - -During the Competition ----------------------- -* Make sure you take advantage of the field calibration time given at the start of the event: - * Bring your robot to the field at the allotted time. - * Turn on your robot and pull up the dashboard on your driver station. - * Point your robot at the target(s) and ensure you get a consistent tracking (you hold one target consistently, the ceiling lights aren't detected, etc.). - * If you have problems with your pipeline, go to the pipeline tuning section and retune the pipeline using the guide there. You want to make your exposure as low as possible with a tight hue value to ensure no extra targets are detected. - * Move the robot close, far, angled, and around the field to ensure no extra targets are found anywhere when looking for a target. - * Go to a practice match to ensure everything is working correctly. - -* After field calibration, use the "Export Settings" button in the "Settings" page to create a backup. - * Do this for each coprocessor on your robot that runs PhotonVision, and name your exports with meaningful names. - * This will contain camera information/calibration, pipeline information, network settings, etc. - * In the event of software/hardware failures (IE lost SD Card, broken device), you can then use the "Import Settings" button and select "All Settings" to restore your settings. - * This effectively works as a snapshot of your PhotonVision data that can be restored at any point. - -* Before every match, check the ethernet connection going into your coprocessor and that it is seated fully. -* Ensure that exposure is as low as possible and that you don't have the dashboard up when you don't need it to reduce bandwidth. -* Stream at as low of a resolution as possible while still detecting targets to stay within bandwidth limits. diff --git a/docs/source/docs/additional-resources/config.md b/docs/source/docs/additional-resources/config.md new file mode 100644 index 0000000000..76fdfee9cb --- /dev/null +++ b/docs/source/docs/additional-resources/config.md @@ -0,0 +1,50 @@ +# Filesystem Directory + +PhotonVision stores and loads settings in the {code}`photonvision_config` directory, in the same folder as the PhotonVision JAR is stored. On the Pi image as well as the Gloworm, this is in the {code}`/opt/photonvision` directory. The contents of this directory can be exported as a zip archive from the settings page of the interface, under "export settings". This export will contain everything detailed below. These settings can later be uploaded using "import settings", to restore configurations from previous backups. + +## Directory Structure + +The directory structure is outlined below. + +```{image} images/configDir.png +:alt: Config directory structure +:width: 600 +``` + +- calibImgs + - Images saved from the last run of the calibration routine +- cameras + - Contains a subfolder for each camera. This folder contains the following files: + - pipelines folder, which contains a {code}`json` file for each user-created pipeline. + - config.json, which contains all camera-specific configuration. This includes FOV, pitch, current pipeline index, and calibration data + - drivermode.json, which contains settings for the driver mode pipeline +- imgSaves + - Contains images saved with the input/output save commands. +- logs + - Contains timestamped logs in the format {code}`photonvision-YYYY-MM-D_HH-MM-SS.log`. Note that on Pi or Gloworm these timestamps will likely be significantly behind the real time. +- hardwareSettings.json + - Contains hardware settings. Currently this includes only the LED brightness. +- networkSettings.json + - Contains network settings, including team number (or remote network tables address), static/dynamic settings, and hostname. + +## Importing and Exporting Settings + +The entire settings directory can be exported as a ZIP archive from the settings page. + +```{raw} html + +``` + +A variety of files can be imported back into PhotonVision: + +- ZIP Archive ({code}`.zip`) + - Useful for restoring a full configuration from a different PhotonVision instance. +- Single Config File + - Currently-supported Files + - {code}`hardwareConfig.json` + - {code}`hardwareSettings.json` + - {code}`networkSettings.json` + - Useful for simple hardware or network configuration tasks without overwriting all settings. diff --git a/docs/source/docs/additional-resources/config.rst b/docs/source/docs/additional-resources/config.rst deleted file mode 100644 index 480b9a04bf..0000000000 --- a/docs/source/docs/additional-resources/config.rst +++ /dev/null @@ -1,54 +0,0 @@ -Filesystem Directory -==================== - -PhotonVision stores and loads settings in the :code:`photonvision_config` directory, in the same folder as the PhotonVision JAR is stored. On the Pi image as well as the Gloworm, this is in the :code:`/opt/photonvision` directory. The contents of this directory can be exported as a zip archive from the settings page of the interface, under "export settings". This export will contain everything detailed below. These settings can later be uploaded using "import settings", to restore configurations from previous backups. - - -Directory Structure -------------------- - -The directory structure is outlined below. - -.. image:: images/configDir.png - :width: 600 - :alt: Config directory structure - -* calibImgs - - Images saved from the last run of the calibration routine -* cameras - - Contains a subfolder for each camera. This folder contains the following files: - + pipelines folder, which contains a :code:`json` file for each user-created pipeline. - + config.json, which contains all camera-specific configuration. This includes FOV, pitch, current pipeline index, and calibration data - + drivermode.json, which contains settings for the driver mode pipeline -* imgSaves - - Contains images saved with the input/output save commands. -* logs - - Contains timestamped logs in the format :code:`photonvision-YYYY-MM-D_HH-MM-SS.log`. Note that on Pi or Gloworm these timestamps will likely be significantly behind the real time. -* hardwareSettings.json - - Contains hardware settings. Currently this includes only the LED brightness. -* networkSettings.json - - Contains network settings, including team number (or remote network tables address), static/dynamic settings, and hostname. - -Importing and Exporting Settings --------------------------------- - -The entire settings directory can be exported as a ZIP archive from the settings page. - - -.. raw:: html - - - -A variety of files can be imported back into PhotonVision: - -- ZIP Archive (:code:`.zip`) - - Useful for restoring a full configuration from a different PhotonVision instance. -- Single Config File - - Currently-supported Files - - :code:`hardwareConfig.json` - - :code:`hardwareSettings.json` - - :code:`networkSettings.json` - - Useful for simple hardware or network configuration tasks without overwriting all settings. diff --git a/docs/source/docs/additional-resources/nt-api.md b/docs/source/docs/additional-resources/nt-api.md new file mode 100644 index 0000000000..e89c26b356 --- /dev/null +++ b/docs/source/docs/additional-resources/nt-api.md @@ -0,0 +1,70 @@ +# NetworkTables API + +## About + +:::{warning} +PhotonVision interfaces with PhotonLib, our vendor dependency, using NetworkTables. If you are running PhotonVision on a robot (ie. with a RoboRIO), you should **turn the NetworkTables server switch (in the settings tab) off** in order to get PhotonLib to work. Also ensure that you set your team number. The NetworkTables server should only be enabled if you know what you're doing! +::: + +## API + +:::{warning} +NetworkTables is not a supported setup/viable option when using PhotonVision as we only send one target at a time (this is problematic when using AprilTags, which will return data from multiple tags at once). We recommend using PhotonLib. +::: + +The tables below contain the the name of the key for each entry that PhotonVision sends over the network and a short description of the key. The entries should be extracted from a subtable with your camera's nickname (visible in the PhotonVision UI) under the main `photonvision` table. + +### Getting Target Information + +| Key | Type | Description | +| --------------- | ---------- | ------------------------------------------------------------------------ | +| `rawBytes` | `byte[]` | A byte-packed string that contains target info from the same timestamp. | +| `latencyMillis` | `double` | The latency of the pipeline in milliseconds. | +| `hasTarget` | `boolean` | Whether the pipeline is detecting targets or not. | +| `targetPitch` | `double` | The pitch of the target in degrees (positive up). | +| `targetYaw` | `double` | The yaw of the target in degrees (positive right). | +| `targetArea` | `double` | The area (percent of bounding box in screen) as a percent (0-100). | +| `targetSkew` | `double` | The skew of the target in degrees (counter-clockwise positive). | +| `targetPose` | `double[]` | The pose of the target relative to the robot (x, y, z, qw, qx, qy, qz) | +| `targetPixelsX` | `double` | The target crosshair location horizontally, in pixels (origin top-right) | +| `targetPixelsY` | `double` | The target crosshair location vertically, in pixels (origin top-right) | + +### Changing Settings + +| Key | Type | Description | +| --------------- | --------- | --------------------------- | +| `pipelineIndex` | `int` | Changes the pipeline index. | +| `driverMode` | `boolean` | Toggles driver mode. | + +### Saving Images + +PhotonVision can save images to file on command. The image is saved when PhotonVision detects the command went from `false` to `true`. + +PhotonVision will automatically set these back to `false` after 500ms. + +Be careful saving images rapidly - it will slow vision processing performance and take up disk space very quickly. + +Images are returned as part of the .zip package from the "Export" operation in the Settings tab. + +| Key | Type | Description | +| ------------------ | --------- | ------------------------------------------------- | +| `inputSaveImgCmd` | `boolean` | Triggers saving the current input image to file. | +| `outputSaveImgCmd` | `boolean` | Triggers saving the current output image to file. | + +:::{warning} +If you manage to make calls to these commands faster than 500ms (between calls), additional photos will not be captured. +::: + +### Global Entries + +These entries are global, meaning that they should be called on the main `photonvision` table. + +| Key | Type | Description | +| --------- | ----- | -------------------------------------------------------- | +| `ledMode` | `int` | Sets the LED Mode (-1: default, 0: off, 1: on, 2: blink) | + +:::{warning} +Setting the LED mode to -1 (default) when `multiple` cameras are connected may result in unexpected behavior. {ref}`This is a known limitation of PhotonVision. ` + +Single camera operation should work without issue. +::: diff --git a/docs/source/docs/additional-resources/nt-api.rst b/docs/source/docs/additional-resources/nt-api.rst deleted file mode 100644 index c52ac70d83..0000000000 --- a/docs/source/docs/additional-resources/nt-api.rst +++ /dev/null @@ -1,86 +0,0 @@ -NetworkTables API -================= -About -^^^^^ - -.. warning:: - PhotonVision interfaces with PhotonLib, our vendor dependency, using NetworkTables. If you are running PhotonVision on a robot (ie. with a RoboRIO), you should **turn the NetworkTables server switch (in the settings tab) off** in order to get PhotonLib to work. Also ensure that you set your team number. The NetworkTables server should only be enabled if you know what you're doing! - -API -^^^ - -.. warning:: NetworkTables is not a supported setup/viable option when using PhotonVision as we only send one target at a time (this is problematic when using AprilTags, which will return data from multiple tags at once). We recommend using PhotonLib. - -The tables below contain the the name of the key for each entry that PhotonVision sends over the network and a short description of the key. The entries should be extracted from a subtable with your camera's nickname (visible in the PhotonVision UI) under the main ``photonvision`` table. - -Getting Target Information --------------------------- -+-------------------+--------------+--------------------------------------------------------------------------+ -| Key | Type | Description | -+===================+==============+==========================================================================+ -| ``rawBytes`` | ``byte[]`` | A byte-packed string that contains target info from the same timestamp. | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``latencyMillis`` | ``double`` | The latency of the pipeline in milliseconds. | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``hasTarget`` | ``boolean`` | Whether the pipeline is detecting targets or not. | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetPitch`` | ``double`` | The pitch of the target in degrees (positive up). | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetYaw`` | ``double`` | The yaw of the target in degrees (positive right). | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetArea`` | ``double`` | The area (percent of bounding box in screen) as a percent (0-100). | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetSkew`` | ``double`` | The skew of the target in degrees (counter-clockwise positive). | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetPose`` | ``double[]`` | The pose of the target relative to the robot (x, y, z, qw, qx, qy, qz) | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetPixelsX`` | ``double`` | The target crosshair location horizontally, in pixels (origin top-right) | -+-------------------+--------------+--------------------------------------------------------------------------+ -| ``targetPixelsY`` | ``double`` | The target crosshair location vertically, in pixels (origin top-right) | -+-------------------+--------------+--------------------------------------------------------------------------+ - -Changing Settings ------------------ -+-------------------+-------------+-----------------------------+ -| Key | Type | Description | -+===================+=============+=============================+ -| ``pipelineIndex`` | ``int`` | Changes the pipeline index. | -+-------------------+-------------+-----------------------------+ -| ``driverMode`` | ``boolean`` | Toggles driver mode. | -+-------------------+-------------+-----------------------------+ - - -Saving Images ------------------ -PhotonVision can save images to file on command. The image is saved when PhotonVision detects the command went from ``false`` to ``true``. - -PhotonVision will automatically set these back to ``false`` after 500ms. - -Be careful saving images rapidly - it will slow vision processing performance and take up disk space very quickly. - -Images are returned as part of the .zip package from the "Export" operation in the Settings tab. - -+----------------------+-------------+----------------------------------------------------+ -| Key | Type | Description | -+======================+=============+====================================================+ -| ``inputSaveImgCmd`` | ``boolean`` | Triggers saving the current input image to file. | -+----------------------+-------------+----------------------------------------------------+ -| ``outputSaveImgCmd`` | ``boolean`` | Triggers saving the current output image to file. | -+----------------------+-------------+----------------------------------------------------+ - -.. warning:: If you manage to make calls to these commands faster than 500ms (between calls), additional photos will not be captured. - -Global Entries --------------- -These entries are global, meaning that they should be called on the main ``photonvision`` table. - -+-------------+---------+----------------------------------------------------------+ -| Key | Type | Description | -+=============+=========+==========================================================+ -| ``ledMode`` | ``int`` | Sets the LED Mode (-1: default, 0: off, 1: on, 2: blink) | -+-------------+---------+----------------------------------------------------------+ - -.. warning:: - Setting the LED mode to -1 (default) when `multiple` cameras are connected may result in unexpected behavior. :ref:`This is a known limitation of PhotonVision. ` - - Single camera operation should work without issue. diff --git a/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.rst b/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.md similarity index 85% rename from docs/source/docs/apriltag-pipelines/2D-tracking-tuning.rst rename to docs/source/docs/apriltag-pipelines/2D-tracking-tuning.md index b8ee6441f9..84e7606119 100644 --- a/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.rst +++ b/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.md @@ -1,66 +1,54 @@ -2D AprilTag Tuning / Tracking -============================= +# 2D AprilTag Tuning / Tracking -Tracking Apriltags ------------------- +## Tracking Apriltags Before you get started tracking AprilTags, ensure that you have followed the previous sections on installation, wiring and networking. Next, open the Web UI, go to the top right card, and switch to the "AprilTag" or "Aruco" type. You should see a screen similar to the one below. -.. image:: images/apriltag.png - :align: center +```{image} images/apriltag.png +:align: center +``` -| +You are now able to detect and track AprilTags in 2D (yaw, pitch, roll, etc.). In order to get 3D data from your AprilTags, please see {ref}`here. ` -You are now able to detect and track AprilTags in 2D (yaw, pitch, roll, etc.). In order to get 3D data from your AprilTags, please see :ref:`here. ` - -Tuning AprilTags ----------------- +## Tuning AprilTags AprilTag pipelines come with reasonable defaults to get you up and running with tracking. However, in order to optimize your performance and accuracy, you must tune your AprilTag pipeline using the settings below. Note that the settings below are different between the AprilTag and Aruco detectors but the concepts are the same. -.. image:: images/apriltag-tune.png - :scale: 45 % - :align: center - -| +```{image} images/apriltag-tune.png +:align: center +:scale: 45 % +``` -Target Family -^^^^^^^^^^^^^ +### Target Family -Target families are defined by two numbers (before and after the h). The first number is the number of bits the tag is able to encode (which means more tags are available in the respective family) and the second is the hamming distance. Hamming distance describes the ability for error correction while identifying tag ids. A high hamming distance generally means that it will be easier for a tag to be identified even if there are errors. However, as hamming distance increases, the number of available tags decreases. The 2024 FRC game will be using 36h11 tags, which can be found `here `_. +Target families are defined by two numbers (before and after the h). The first number is the number of bits the tag is able to encode (which means more tags are available in the respective family) and the second is the hamming distance. Hamming distance describes the ability for error correction while identifying tag ids. A high hamming distance generally means that it will be easier for a tag to be identified even if there are errors. However, as hamming distance increases, the number of available tags decreases. The 2024 FRC game will be using 36h11 tags, which can be found [here](https://github.com/AprilRobotics/apriltag-imgs/tree/master/tag36h11). -Decimate -^^^^^^^^ +### Decimate Decimation (also known as down-sampling) is the process of reducing the sampling frequency of a signal (in our case, the image). Increasing decimate will lead to an increased detection rate while decreasing detection distance. We recommend keeping this at the default value. -Blur -^^^^ -This controls the sigma of Gaussian blur for tag detection. In clearer terms, increasing blur will make the image blurrier, decreasing it will make it closer to the original image. We strongly recommend that you keep blur to a minimum (0) due to it's high performance intensity unless you have an extremely noisy image. +### Blur +This controls the sigma of Gaussian blur for tag detection. In clearer terms, increasing blur will make the image blurrier, decreasing it will make it closer to the original image. We strongly recommend that you keep blur to a minimum (0) due to it's high performance intensity unless you have an extremely noisy image. -Threads -^^^^^^^ +### Threads Threads refers to the threads within your coprocessor's CPU. The theoretical maximum is device dependent, but we recommend that users to stick to one less than the amount of CPU threads that your coprocessor has. Increasing threads will increase performance at the cost of increased CPU load, temperature increase, etc. It may take some experimentation to find the most optimal value for your system. -Refine Edges -^^^^^^^^^^^^ +### Refine Edges The edges of the each polygon are adjusted to "snap to" high color differences surrounding it. It is recommended to use this in tandem with decimate as it can increase the quality of the initial estimate. -Pose Iterations -^^^^^^^^^^^^^^^ +### Pose Iterations Pose iterations represents the amount of iterations done in order for the AprilTag algorithm to converge on its pose solution(s). A smaller number between 0-100 is recommended. A smaller amount of iterations cause a more noisy set of poses when looking at the tag straight on, while higher values much more consistently stick to a (potentially wrong) pair of poses. WPILib contains many useful filter classes in order to account for a noisy tag reading. -Max Error Bits -^^^^^^^^^^^^^^ +### Max Error Bits Max error bits, also known as hamming distance, is the number of positions at which corresponding pieces of data / tag are different. Put more generally, this is the number of bits (think of these as squares in the tag) that need to be changed / corrected in the tag to correctly detect it. A higher value means that more tags will be detected while a lower value cuts out tags that could be "questionable" in terms of detection. We recommend a value of 0 for the 16h5 and 7+ for the 36h11 family. -Decision Margin Cutoff -^^^^^^^^^^^^^^^^^^^^^^ +### Decision Margin Cutoff + The decision margin cutoff is how much “margin” the detector has left before it rejects a tag; increasing this rejects poorer tags. We recommend you keep this value around a 30. diff --git a/docs/source/docs/apriltag-pipelines/3D-tracking.rst b/docs/source/docs/apriltag-pipelines/3D-tracking.md similarity index 86% rename from docs/source/docs/apriltag-pipelines/3D-tracking.rst rename to docs/source/docs/apriltag-pipelines/3D-tracking.md index 4e06dd0005..416d12ab6e 100644 --- a/docs/source/docs/apriltag-pipelines/3D-tracking.rst +++ b/docs/source/docs/apriltag-pipelines/3D-tracking.md @@ -1,15 +1,13 @@ -3D Tracking -=========== +# 3D Tracking -3D AprilTag tracking will allow you to track the real-world position and rotation of a tag relative to the camera's image sensor. This is useful for robot pose estimation and other applications like autonomous scoring. In order to use 3D tracking, you must first :ref:`calibrate your camera `. Once you have, you need to enable 3D mode in the UI and you will now be able to get 3D pose information from the tag! For information on getting and using this information in your code, see :ref:`the programming reference. `. +3D AprilTag tracking will allow you to track the real-world position and rotation of a tag relative to the camera's image sensor. This is useful for robot pose estimation and other applications like autonomous scoring. In order to use 3D tracking, you must first {ref}`calibrate your camera `. Once you have, you need to enable 3D mode in the UI and you will now be able to get 3D pose information from the tag! For information on getting and using this information in your code, see {ref}`the programming reference. `. -Ambiguity ---------- +## Ambiguity Translating from 2D to 3D using data from the calibration and the four tag corners can lead to "pose ambiguity", where it appears that the AprilTag pose is flipping between two different poses. You can read more about this issue `here. ` Ambiguity is calculated as the ratio of reprojection errors between two pose solutions (if they exist), where reprojection error is the error corresponding to the image distance between where the apriltag's corners are detected vs where we expect to see them based on the tag's estimated camera relative pose. There are a few steps you can take to resolve/mitigate this issue: 1. Mount cameras at oblique angles so it is less likely that the tag will be seen straight on. -2. Use the :ref:`MultiTag system ` in order to combine the corners from multiple tags to get a more accurate and unambiguous pose. +2. Use the {ref}`MultiTag system ` in order to combine the corners from multiple tags to get a more accurate and unambiguous pose. 3. Reject all tag poses where the ambiguity ratio (available via PhotonLib) is greater than 0.2. diff --git a/docs/source/docs/apriltag-pipelines/about-apriltags.rst b/docs/source/docs/apriltag-pipelines/about-apriltags.md similarity index 61% rename from docs/source/docs/apriltag-pipelines/about-apriltags.rst rename to docs/source/docs/apriltag-pipelines/about-apriltags.md index 5f39c32d23..aaee12ff96 100644 --- a/docs/source/docs/apriltag-pipelines/about-apriltags.rst +++ b/docs/source/docs/apriltag-pipelines/about-apriltags.md @@ -1,12 +1,14 @@ -About Apriltags -=============== +# About Apriltags -.. image:: images/pv-apriltag.png - :align: center - :scale: 20 % +```{image} images/pv-apriltag.png +:align: center +:scale: 20 % +``` AprilTags are a common type of visual fiducial marker. Visual fiducial markers are artificial landmarks added to a scene to allow "localization" (finding your current position) via images. In simpler terms, tags mark known points of reference that you can use to find your current location. They are similar to QR codes in which they encode information, however, they hold only a single number. By placing AprilTags in known locations around the field and detecting them using PhotonVision, you can easily get full field localization / pose estimation. Alternatively, you can use AprilTags the same way you used retroreflective tape, simply using them to turn to goal without any pose estimation. -A more technical explanation can be found in the `WPILib documentation `_. +A more technical explanation can be found in the [WPILib documentation](https://docs.wpilib.org/en/latest/docs/software/vision-processing/apriltag/apriltag-intro.html). -.. note:: You can get FIRST's `official PDF of the targets used in 2024 here `_. +:::{note} +You can get FIRST's [official PDF of the targets used in 2024 here](https://firstfrc.blob.core.windows.net/frc2024/FieldAssets/Apriltag_Images_and_User_Guide.pdf). +::: diff --git a/docs/source/docs/apriltag-pipelines/coordinate-systems.rst b/docs/source/docs/apriltag-pipelines/coordinate-systems.md similarity index 56% rename from docs/source/docs/apriltag-pipelines/coordinate-systems.rst rename to docs/source/docs/apriltag-pipelines/coordinate-systems.md index 21dbb8f01a..c828321e9e 100644 --- a/docs/source/docs/apriltag-pipelines/coordinate-systems.rst +++ b/docs/source/docs/apriltag-pipelines/coordinate-systems.md @@ -1,49 +1,40 @@ -Coordinate Systems -================== +# Coordinate Systems -Field and Robot Coordinate Frame --------------------------------- +## Field and Robot Coordinate Frame -PhotonVision follows the WPILib conventions for the robot and field coordinate systems, as defined `here `_. +PhotonVision follows the WPILib conventions for the robot and field coordinate systems, as defined [here](https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/coordinate-systems.html). You define the camera to robot transform in the robot coordinate frame. -Camera Coordinate Frame ------------------------ +## Camera Coordinate Frame OpenCV by default uses x-left/y-down/z-out for camera transforms. PhotonVision applies a base rotation to this transformation to make robot to tag transforms more in line with the WPILib coordinate system. The x, y, and z axes are also shown in red, green, and blue in the 3D mini-map and targeting overlay in the UI. -* The origin is the focal point of the camera lens -* The x-axis points out of the camera -* The y-axis points to the left -* The z-axis points upwards +- The origin is the focal point of the camera lens +- The x-axis points out of the camera +- The y-axis points to the left +- The z-axis points upwards +```{image} images/camera-coord.png +:align: center +:scale: 45 % +``` -.. image:: images/camera-coord.png - :scale: 45 % - :align: center +```{image} images/multiple-tags.png +:align: center +:scale: 45 % +``` -| - -.. image:: images/multiple-tags.png - :scale: 45 % - :align: center - -| - -AprilTag Coordinate Frame -------------------------- +## AprilTag Coordinate Frame The AprilTag coordinate system is defined as follows, relative to the center of the AprilTag itself, and when viewing the tag as a robot would. Again, PhotonVision changes this coordinate system to be more in line with WPILib. This means that a robot facing a tag head-on would see a robot-to-tag transform with a translation only in x, and a rotation of 180 degrees about z. The tag coordinate system is also shown with x/y/z in red/green/blue in the UI target overlay and mini-map. -* The origin is the center of the tag -* The x-axis is normal to the plane the tag is printed on, pointing outward from the visible side of the tag. -* The y-axis points to the right -* The z-axis points upwards - - -.. image:: images/apriltag-coords.png - :scale: 45 % - :align: center +- The origin is the center of the tag +- The x-axis is normal to the plane the tag is printed on, pointing outward from the visible side of the tag. +- The y-axis points to the right +- The z-axis points upwards -| +```{image} images/apriltag-coords.png +:align: center +:scale: 45 % +``` diff --git a/docs/source/docs/apriltag-pipelines/detector-types.md b/docs/source/docs/apriltag-pipelines/detector-types.md new file mode 100644 index 0000000000..76d01b1008 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/detector-types.md @@ -0,0 +1,15 @@ +# AprilTag Pipeline Types + +PhotonVision offers two different AprilTag pipeline types based on different implementations of the underlying algorithm. Each one has its advantages / disadvantages, which are detailed below. + +:::{note} +Note that both of these pipeline types detect AprilTag markers and are just two different algorithms for doing so. +::: + +## AprilTag + +The AprilTag pipeline type is based on the [AprilTag](https://april.eecs.umich.edu/software/apriltag.html) library from the University of Michigan and we recommend it for most use cases. It is (to our understanding) most accurate pipeline type, but is also ~2x slower than AruCo. This was the pipeline type used by teams in the 2023 season and is well tested. + +## AruCo + +The AruCo pipeline is based on the [AruCo](https://docs.opencv.org/4.8.0/d9/d6a/group__aruco.html) library implementation from OpenCV. It is ~2x higher fps and ~2x lower latency than the AprilTag pipeline type, but is less accurate. We recommend this pipeline type for teams that need to run at a higher framerate or have a lower powered device. This pipeline type is new for the 2024 season and is not as well tested as AprilTag. diff --git a/docs/source/docs/apriltag-pipelines/detector-types.rst b/docs/source/docs/apriltag-pipelines/detector-types.rst deleted file mode 100644 index 3e596b6aa9..0000000000 --- a/docs/source/docs/apriltag-pipelines/detector-types.rst +++ /dev/null @@ -1,15 +0,0 @@ -AprilTag Pipeline Types -======================= - -PhotonVision offers two different AprilTag pipeline types based on different implementations of the underlying algorithm. Each one has its advantages / disadvantages, which are detailed below. - -.. note:: Note that both of these pipeline types detect AprilTag markers and are just two different algorithms for doing so. - -AprilTag --------- - -The AprilTag pipeline type is based on the `AprilTag `_ library from the University of Michigan and we recommend it for most use cases. It is (to our understanding) most accurate pipeline type, but is also ~2x slower than AruCo. This was the pipeline type used by teams in the 2023 season and is well tested. - -AruCo ------ -The AruCo pipeline is based on the `AruCo `_ library implementation from OpenCV. It is ~2x higher fps and ~2x lower latency than the AprilTag pipeline type, but is less accurate. We recommend this pipeline type for teams that need to run at a higher framerate or have a lower powered device. This pipeline type is new for the 2024 season and is not as well tested as AprilTag. diff --git a/docs/source/docs/apriltag-pipelines/index.md b/docs/source/docs/apriltag-pipelines/index.md new file mode 100644 index 0000000000..1a9044e964 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/index.md @@ -0,0 +1,10 @@ +# AprilTag Detection + +```{toctree} +about-apriltags +detector-types +2D-tracking-tuning +3D-tracking +multitag +coordinate-systems +``` diff --git a/docs/source/docs/apriltag-pipelines/index.rst b/docs/source/docs/apriltag-pipelines/index.rst deleted file mode 100644 index 920b4cdb1e..0000000000 --- a/docs/source/docs/apriltag-pipelines/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -AprilTag Detection -================== - -.. toctree:: - - about-apriltags - detector-types - 2D-tracking-tuning - 3D-tracking - multitag - coordinate-systems diff --git a/docs/source/docs/apriltag-pipelines/multitag.rst b/docs/source/docs/apriltag-pipelines/multitag.md similarity index 50% rename from docs/source/docs/apriltag-pipelines/multitag.rst rename to docs/source/docs/apriltag-pipelines/multitag.md index 1b03a66477..da5169fb04 100644 --- a/docs/source/docs/apriltag-pipelines/multitag.rst +++ b/docs/source/docs/apriltag-pipelines/multitag.md @@ -1,28 +1,30 @@ -MultiTag Localization -===================== +# MultiTag Localization -PhotonVision can combine AprilTag detections from multiple simultaneously observed AprilTags from a particular camera with information about where tags are expected to be located on the field to produce a better estimate of where the camera (and therefore robot) is located on the field. PhotonVision can calculate this multi-target result on your coprocessor, reducing CPU usage on your RoboRio. This result is sent over NetworkTables along with other detected targets as part of the ``PhotonPipelineResult`` provided by PhotonLib. +PhotonVision can combine AprilTag detections from multiple simultaneously observed AprilTags from a particular camera with information about where tags are expected to be located on the field to produce a better estimate of where the camera (and therefore robot) is located on the field. PhotonVision can calculate this multi-target result on your coprocessor, reducing CPU usage on your RoboRio. This result is sent over NetworkTables along with other detected targets as part of the `PhotonPipelineResult` provided by PhotonLib. -.. warning:: MultiTag requires an accurate field layout JSON to be uploaded! Differences between this layout and the tags' physical location will drive error in the estimated pose output. +:::{warning} +MultiTag requires an accurate field layout JSON to be uploaded! Differences between this layout and the tags' physical location will drive error in the estimated pose output. +::: -Enabling MultiTag -^^^^^^^^^^^^^^^^^ +## Enabling MultiTag Ensure that your camera is calibrated and 3D mode is enabled. Navigate to the Output tab and enable "Do Multi-Target Estimation". This enables MultiTag to use the uploaded field layout JSON to calculate your camera's pose in the field. This 3D transform will be shown as an additional table in the "targets" tab, along with the IDs of AprilTags used to compute this transform. -.. image:: images/multitag-ui.png - :width: 600 - :alt: Multitarget enabled and running in the PhotonVision UI - -.. note:: By default, enabling multi-target will disable calculating camera-to-target transforms for each observed AprilTag target to increase performance; the X/Y/angle numbers shown in the target table of the UI are instead calculated using the tag's expected location (per the field layout JSON) and the field-to-camera transform calculated using MultiTag. If you additionally want the individual camera-to-target transform calculated using SolvePNP for each target, enable "Always Do Single-Target Estimation". - -This multi-target pose estimate can be accessed using PhotonLib. We suggest using :ref:`the PhotonPoseEstimator class ` with the ``MULTI_TAG_PNP_ON_COPROCESSOR`` strategy to simplify code, but the transform can be directly accessed using ``getMultiTagResult``/``MultiTagResult()`` (Java/C++). +```{image} images/multitag-ui.png +:alt: Multitarget enabled and running in the PhotonVision UI +:width: 600 +``` +:::{note} +By default, enabling multi-target will disable calculating camera-to-target transforms for each observed AprilTag target to increase performance; the X/Y/angle numbers shown in the target table of the UI are instead calculated using the tag's expected location (per the field layout JSON) and the field-to-camera transform calculated using MultiTag. If you additionally want the individual camera-to-target transform calculated using SolvePNP for each target, enable "Always Do Single-Target Estimation". +::: +This multi-target pose estimate can be accessed using PhotonLib. We suggest using {ref}`the PhotonPoseEstimator class ` with the `MULTI_TAG_PNP_ON_COPROCESSOR` strategy to simplify code, but the transform can be directly accessed using `getMultiTagResult`/`MultiTagResult()` (Java/C++). +```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java var result = camera.getLatestResult(); if (result.getMultiTagResult().estimatedPose.isPresent) { @@ -37,17 +39,27 @@ This multi-target pose estimate can be accessed using PhotonLib. We suggest usin frc::Transform3d fieldToCamera = result.MultiTagResult().result.best; } -.. note:: The returned field to camera transform is a transform from the fixed field origin to the camera's coordinate system. This does not change based on alliance color, and by convention is on the BLUE ALLIANCE wall. + .. code-block:: Python + + # Coming Soon! + +``` + +:::{note} +The returned field to camera transform is a transform from the fixed field origin to the camera's coordinate system. This does not change based on alliance color, and by convention is on the BLUE ALLIANCE wall. +::: -Updating the Field Layout -^^^^^^^^^^^^^^^^^^^^^^^^^ +## Updating the Field Layout -PhotonVision ships by default with the `2024 field layout JSON `_. The layout can be inspected by navigating to the settings tab and scrolling down to the "AprilTag Field Layout" card, as shown below. +PhotonVision ships by default with the [2024 field layout JSON](https://github.com/wpilibsuite/allwpilib/blob/main/apriltag/src/main/native/resources/edu/wpi/first/apriltag/2024-crescendo.json). The layout can be inspected by navigating to the settings tab and scrolling down to the "AprilTag Field Layout" card, as shown below. -.. image:: images/field-layout.png - :width: 600 - :alt: The currently saved field layout in the Photon UI +```{image} images/field-layout.png +:alt: The currently saved field layout in the Photon UI +:width: 600 +``` An updated field layout can be uploaded by navigating to the "Device Control" card of the Settings tab and clicking "Import Settings". In the pop-up dialog, select the "AprilTag Layout" type and choose an updated layout JSON (in the same format as the WPILib field layout JSON linked above) using the paperclip icon, and select "Import Settings". The AprilTag layout in the "AprilTag Field Layout" card below should be updated to reflect the new layout. -.. note:: Currently, there is no way to update this layout using PhotonLib, although this feature is under consideration. +:::{note} +Currently, there is no way to update this layout using PhotonLib, although this feature is under consideration. +::: diff --git a/docs/source/docs/calibration/calibration.md b/docs/source/docs/calibration/calibration.md new file mode 100644 index 0000000000..7f866d1dd5 --- /dev/null +++ b/docs/source/docs/calibration/calibration.md @@ -0,0 +1,147 @@ +# Calibrating Your Camera + +:::{important} +In order to detect AprilTags and use 3D mode, your camera must be calibrated at the desired resolution! Inaccurate calibration will lead to poor performance. +::: + +To calibrate a camera, images of a Charuco board (or chessboard) are taken. By comparing where the grid corners should be in object space (for example, a corner once every inch in an 8x6 grid) with where they appear in the camera image, we can find a least-squares estimate for intrinsic camera properties like focal lengths, center point, and distortion coefficients. For more on camera calibration, please review the [OpenCV documentation](https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html>). + +:::{warning} +While any resolution can be calibrated, higher resolutions may be too performance-intensive for some coprocessors to handle. Therefore, we recommend experimenting to see what works best for your coprocessor. +::: + +:::{note} +The calibration data collected during calibration is specific to each physical camera, as well as each individual resolution. +::: + +## Calibration Tips + +Accurate camera calibration is required in order to get accurate pose measurements when using AprilTags and 3D mode. The tips below should help ensure success: + +01. Ensure your the images you take have the target in different positions and angles, with as big of a difference between angles as possible. It is important to make sure the target overlay still lines up with the board while doing this. Tilt no more than 45 degrees. +02. Use as big of a calibration target as your printer can print. +03. Ensure that your printed pattern has enough white border around it. +04. Ensure your camera stays in one position during the duration of the calibration. +05. Make sure you get all 12 images from varying distances and angles. +06. Take at least one image that covers the total image area, and generally ensure that you get even coverage of the lens with your image set. +07. Have good lighting, having a diffusely lit target would be best (light specifically shining on the target without shadows). +08. Ensure the calibration target is completely flat and does not bend or fold in any way. It should be mounted/taped down to something flat and then used for calibration, do not just hold it up. +09. Avoid having targets that are parallel to the lens of the camera / straight on towards the camera as much as possible. You want angles and variations within your calibration images. + +Following the ideas above should help in getting an accurate calibration. + +## Calibrating using PhotonVision + +### 1. Navigate to the calibration section in the UI. + +The Cameras tab of the UI houses PhotonVision's camera calibration tooling. It assists users with calibrating their cameras, as well as allows them to view previously calibrated resolutions. We support both charuco and chessboard calibrations. + +### 2. Print out the calibration target. + +In the Camera Calibration tab, we'll print out the calibration target using the "Download" button. This should be printed on 8.5x11 printer paper. This page shows using an 8x8 charuco board (or chessboard depending on the selected calibration type). + +:::{warning} +Ensure that there is no scaling applied during printing (it should be at 100%) and that the PDF is printed as is on regular printer paper. Check the square size with calipers or an accurate measuring device after printing to ensure squares are sized properly, and enter the true size of the square in the UI text box. For optimal results, various resources are available online to calibrate your specific printer if needed. +::: + +### 3. Select calibration resolution and fill in appropriate target data. + +We'll next select a resolution to calibrate and populate our pattern spacing, marker size, and board size. The provided chessboard and charuco board are an 8x8 grid of 1 inch square. The provided charuco board uses the 4x4 dictionary with a marker size of 0.75 inches (this board does not need the old OpenCV pattern selector selected). Printers are not perfect, and you need to measure your calibration target and enter the correct marker size (size of the aruco marker) and pattern spacing (aka size of the black square) using calipers or similar. Finally, once our entered data is correct, we'll click "start calibration." + +:::{warning} Old OpenCV Pattern selector. This should be used in the case that the calibration image is generated from a version of OpenCV before version 4.6.0. This would include targets created by calib.io. If this selector is not set correctly the calibration will be completely invalid. For more info view [this GitHub issue](https://github.com/opencv/opencv_contrib/issues/3291). +::: + +### 4. Take at calibration images from various angles. + +Now, we'll capture images of our board from various angles. It's important to check that the board overlay matches the board in your image. The further the overdrawn points are from the true position of the chessboard corners, the less accurate the final calibration will be. We'll want to capture enough images to cover the whole camera's FOV (with a minimum of 12). Once we've got our images, we'll click "Finish calibration" and wait for the calibration process to complete. If all goes well, the mean error and FOVs will be shown in the table on the right. The FOV should be close to the camera's specified FOV (usually found in a datasheet) usually within + or - 10 degrees. The mean error should also be low, usually less than 1 pixel. + +```{raw} html + +``` + +## Accessing Calibration Images + +Details about a particular calibration can be viewed by clicking on that resolution in the calibrations tab. This tab allows you to download raw calibration data, upload a previous calibration, and inspect details about calculated camera intrinsic. + +```{image} images/cal-details.png +:alt: Captured calibration images +:width: 600 +``` + +:::{note} +More info on what these parameters mean can be found in [OpenCV's docs](https://docs.opencv.org/4.8.0/d4/d94/tutorial_camera_calibration.html) +::: + +- Fx/Fy: Estimated camera focal length, in mm +- Fx/Cy: Estimated camera optical center, in pixels. This should be at about the center of the image +- Distortion: OpenCV camera model distortion coefficients +- FOV: calculated using estimated focal length and image size. Useful for gut-checking calibration results +- Mean Err: Mean reprojection error, or distance between expected and observed chessboard cameras for the full calibration dataset + +Below these outputs are the snapshots collected for calibration, along with a per-snapshot mean reprojection error. A snapshot with a larger reprojection error might indicate a bad snapshot, due to effects such as motion blur or misidentified chessboard corners. + +Calibration images can also be extracted from the downloaded JSON file using [this Python script](https://raw.githubusercontent.com/PhotonVision/photonvision/master/devTools/calibrationUtils.py). This script will unpack calibration images, and also generate a VNL file for use [with mrcal](https://mrcal.secretsauce.net/). + +``` +python3 /path/to/calibrationUtils.py path/to/photon_calibration.json /path/to/output/folder +``` + +```{image} images/unpacked-json.png +:alt: Captured calibration images +:width: 600 +``` + +## Investigating Calibration Data with mrcal + +[mrcal](https://mrcal.secretsauce.net/tour.html) is a command-line tool for camera calibration and visualization. PhotonVision has the option to use the mrcal backend during camera calibration to estimate intrinsics. mrcal can also be used post-calibration to inspect snapshots and provide feedback. These steps will closely follow the [mrcal tour](https://mrcal.secretsauce.net/tour-initial-calibration.html) -- I'm aggregating commands and notes here, but the mrcal documentation is much more thorough. + +Start by [Installing mrcal](https://mrcal.secretsauce.net/install.html). Note that while mrcal *calibration* using PhotonVision is supported on all platforms, but investigation right now only works on Linux. Some users have also reported luck using [WSL 2 on Windows](https://learn.microsoft.com/en-us/windows/wsl/tutorials/gui-apps) as well. You may also need to install `feedgnuplot`. On Ubuntu systems, these commands should be run from a standalone terminal and *not* the one [built into vscode](https://github.com/ros2/ros2/issues/1406). + +Let's run `calibrationUtils.py` as described above, and then cd into the output folder. From here, you can follow the mrcal tour, just replacing the VNL filename and camera imager size as necessary. My camera calibration was at 1280x720, so I've set the XY limits to that below. + +``` +$ cd /path/to/output/folder +$ ls +matt@photonvision:~/Documents/Downloads/2024-01-02_lifecam_1280$ ls + corners.vnl img0.png img10.png img11.png img12.png img13.png img1.png + img2.png img3.png img4.png img5.png img6.png img7.png img8.png + img9.png cameramodel_0.cameramodel + +$ < corners.vnl \ + vnl-filter -p x,y | \ + feedgnuplot --domain --square --set 'xrange [0:1280] noextend' --set 'yrange [720:0] noextend' +``` + +```{image} images/mrcal-coverage.svg +:alt: A diagram showing the locations of all detected chessboard corners. +``` + +As you can see, we didn't do a fantastic job of covering our whole camera sensor -- there's a big gap across the whole right side, for example. We also only have 14 calibration images. We've also got our "cameramodel" file, which can be used by mrcal to display additional debug info. + +Let's inspect our reprojection error residuals. We expect their magnitudes and directions to be random -- if there's patterns in the colors shown, then our calibration probably doesn't fully explain our physical camera sensor. + +``` +$ mrcal-show-residuals --magnitudes --set 'cbrange [0:1.5]' ./camera-0.cameramodel +$ mrcal-show-residuals --directions --unset key ./camera-0.cameramodel +``` + +```{image} images/residual-magnitudes.svg +:alt: A diagram showing residual magnitudes +``` + +```{image} images/residual-directions.svg +:alt: A diagram showing residual directions +``` + +Clearly we don't have anywhere near enough data to draw any meaningful conclusions (yet). But for fun, let's dig into [camera uncertainty estimation](https://mrcal.secretsauce.net/tour-uncertainty.html). This diagram shows how expected projection error changes due to noise in calibration inputs. Lower projection error across a larger area of the sensor imply a better calibration that more fully covers the whole sensor. For my calibration data, you can tell the projection error isolines (lines of constant expected projection error) are skewed to the left, following my dataset (which was also skewed left). + +``` +$ mrcal-show-projection-uncertainty --unset key ./cameramodel_0.cameramodel +``` + +```{image} images/camera-uncertainty.svg +:alt: A diagram showing camera uncertainty +``` diff --git a/docs/source/docs/calibration/calibration.rst b/docs/source/docs/calibration/calibration.rst deleted file mode 100644 index 1fbbff12a6..0000000000 --- a/docs/source/docs/calibration/calibration.rst +++ /dev/null @@ -1,159 +0,0 @@ -Calibrating Your Camera -======================= - -.. important:: In order to detect AprilTags and use 3D mode, your camera must be calibrated at the desired resolution! Inaccurate calibration will lead to poor performance. - -To calibrate a camera, images of a chessboard (or grid of dots, or other target) are taken. by comparing where the grid corners (or dots) should be in object space (for example, a dot once every inch in an 8x6 grid) with where they appear in the camera image, we can find a least-squares estimate for intrinsic camera properties like focal lengths, center point, and distortion coefficients. For more on camera calibration, please review the `OpenCV documentation `_. - -.. warning:: While any resolution can be calibrated, resolutions lower than 960x720 are often too low to provide accurate results. Additionally, high resolutions may be too performance intensive for a coprocessor like a Raspberry Pi to handle (solutions to this are being looked into). Thus, we recommend 960x720 when using 3D mode. - -.. note::The calibration data collected during calibration is specific to each physical camera, as well as each individual resolution. - - -Calibration Tips ----------------- -Accurate camera calibration is required in order to get accurate pose measurements when using AprilTags and 3D mode. The tips below should help ensure success: - -1. Practice calibration using your laptop webcam and https://www.calibdb.net/. The target can be found on the website and should be printed out if possible. Once you print it out, try to line up your target with the overlay on the screen as best as possible. The point of this practice is to notice how you are prompted to place targets in certain positions on the screen that make sure you account for all regions of the sensor. The chessboard should (in general) not be facing parallel to the camera (straight on), nor should it be aligned with any of the camera axes (ie, rotated only about an axis going left/right, up/down, or out-of-the-camera). - -2. Ensure your the images you take have the target in different positions and angles, with as big of a difference between angles as possible. It is important to make sure the target overlay still lines up with the board while doing this. Tilt no more than 45 degrees. - -3. Use as big of a calibration target as your printer can print. - -4. Ensure that your printed pattern has enough white border around it. - -5. Ensure your camera stays in one position during the duration of the calibration. - -6. Make sure you get all 12 images from varying distances and angles. - -7. Take at least one image that covers the total image area, and generally ensure that you get even coverage of the lens with your image set. - -8. Have good lighting, having a diffusely lit target would be best (light specifically shining on the target without shadows). - -9. Ensure the calibration target is completely flat and does not bend or fold in any way. It should be mounted/taped down to something flat and then used for calibration, do not just hold it up. - -10. Avoid having targets that are parallel to the lens of the camera / straight on towards the camera as much as possible. You want angles and variations within your calibration images. - -Following the ideas above should help in getting an accurate calibration. - -Calibration Steps ------------------ - -Your camera can be calibrated using either the utility built into PhotonVision, which performs all the calculations on your coprocessor, or using a website such as `calibdb `_, which uses a USB webcam connected to your laptop. The integrated calibration utility is currently the only one that works with ribbon-cable CSI cameras or Limelights, but for USB webcams, calibdb is the preferred option. - -Calibrating using calibdb -------------------------- - -Calibdb uses a modified chessboard/aruco marker combination target called `ChArUco targets. `_ The website currently only supports Chrome browser. - -Download and print out (or display on a monitor) the calibration by clicking Show Pattern. Click "Calibrate" and align your camera with the ghost overlay of the calibration board. The website automatically calculates the next position and displays it for you. When complete, download the calibration (do **not** use the OpenCV format). Reconnect your camera to your coprocessor and navigate to the PhotonVision web interface's camera tab. Ensure the correct camera is selected, and click the "Import from CalibDB" button. Your calibration data will be automatically saved and applied! - -Calibrating using PhotonVision ------------------------------- - -1. Navigate to the calibration section in the UI. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The Cameras tab of the UI houses PhotonVision's camera calibration tooling. It assists users with calibrating their cameras, as well as allows them to view previously calibrated resolutions. We support both dot and chessboard calibrations. - -2. Print out the calibration target. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the Camera Calibration tab, we'll print out the calibration target using the "Download" button. This should be printed on 8.5x11 printer paper. This page shows using an 8x8 chessboard. - -.. warning:: Ensure that there is no scaling applied during printing (it should be at 100%) and that the PDF is printed as is on regular printer paper. Check the square size with calipers or an accurate measuring device after printing to ensure squares are sized properly, and enter the true size of the square in the UI text box. For optimal results, various resources are available online to calibrate your specific printer if needed. - -3. Select calibration resolution and fill in appropriate target data. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -We'll next select a resolution to calibrate and populate our pattern spacing and board size. The provided chessboard is 8 squares in width and height, and each square should be about 1in across. Mine measured with a caliper was 0.96in, but this will vary per printer. Finally, once our entered data is correct, we'll click "start calibration." - -4. Take at calibration images from various angles. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Now, we'll capture images of our chessboard from various angles. The most important part of this step is to make sure that the chessboard overlay matches the chessboard in your image. The further the overdrawn points are from the true position of the chessboard corners, the less accurate the final calibration will be. We'll want to capture at least 12 images, trying to take one in each region of the camera sensor. Once we've got our images, we'll click "Finish calibration" and wait for the calibration process to complete. If all goes well, the mean error and standard deviation will be shown in the table on the right. - -.. raw:: html - - - -Accessing Calibration Images ----------------------------- - -Details about a particular calibration can be viewed by clicking on that resolution in the calibrations tab. This tab allows you to download raw calibration data, upload a previous calibration, and inspect details about calculated camera intrinsics. - -.. image:: images/cal-details.png - :width: 600 - :alt: Captured calibration images - -.. note:: More info on what these parameters mean can be found in `OpenCV's docs `_ - -- Fx/Fy: Estimated camera focal length, in mm -- Fx/Cy: Estimated camera optical center, in pixels. This should be at about the center of the image -- Distortion: OpenCV camera model distortion coefficients -- FOV: calculated using estimated focal length and image size. Useful for gut-checking calibration results -- Mean Err: Mean reprojection error, or distance between expected and observed chessboard cameras for the full calibration dataset - -Below these outputs are the snapshots collected for calibration, along with a per-snapshot mean reprojection error. A snapshot with a larger reprojection error might indicate a bad snapshot, due to effects such as motion blur or misidentified chessboard corners. - -Calibration images can also be extracted from the downloaded JSON file using `this Python script `_. This script will unpack calibration images, and also generate a VNL file for use `with mrcal `_. - -:: - - python3 /path/to/calibrationUtils.py path/to/photon_calibration.json /path/to/output/folder - -.. image:: images/unpacked-json.png - :width: 600 - :alt: Captured calibration images - - -Investigating Calibration Data with mrcal ------------------------------------------ - -`mrcal `_ is a command-line tool for camera calibration and visualization. PhotonVision has the option to use the mrcal backend during camera calibration to estimate intrinsics. mrcal can also be used post-calibration to inspect snapshots and provide feedback. These steps will closely follow the `mrcal tour `_ -- I'm aggregating commands and notes here, but the mrcal documentation is much more thorough. - -Start by `Installing mrcal `_. Note that while mrcal *calibration* using PhotonVision is supported on all platforms, but investigation right now only works on Linux. Some users have also reported luck using `WSL 2 on Windows `ap_ as well. You may also need to install ``feedgnuplot``. On Ubuntu systems, these commands should be run from a standalone terminal and *not* the one `built into vscode `_. - -Let's run ``calibrationUtils.py`` as described above, and then cd into the output folder. From here, you can follow the mrcal tour, just replacing the VNL filename and camera imager size as necessary. My camera calibration was at 1280x720, so I've set the XY limits to that below. - -:: - - $ cd /path/to/output/folder - $ ls - matt@photonvision:~/Documents/Downloads/2024-01-02_lifecam_1280$ ls - corners.vnl img0.png img10.png img11.png img12.png img13.png img1.png - img2.png img3.png img4.png img5.png img6.png img7.png img8.png - img9.png cameramodel_0.cameramodel - - $ < corners.vnl \ - vnl-filter -p x,y | \ - feedgnuplot --domain --square --set 'xrange [0:1280] noextend' --set 'yrange [720:0] noextend' - -.. image:: images/mrcal-coverage.svg - :alt: A diagram showing the locations of all detected chessboard corners. - -As you can see, we didn't do a fantastic job of covering our whole camera sensor -- there's a big gap across the whole right side, for example. We also only have 14 calibration images. We've also got our "cameramodel" file, which can be used by mrcal to display additional debug info. - -Let's inspect our reprojection error residuals. We expect their magnitudes and directions to be random -- if there's patterns in the colors shown, then our calibration probably doesn't fully explain our physical camera sensor. - -:: - - $ mrcal-show-residuals --magnitudes --set 'cbrange [0:1.5]' ./camera-0.cameramodel - $ mrcal-show-residuals --directions --unset key ./camera-0.cameramodel - -.. image:: images/residual-magnitudes.svg - :alt: A diagram showing residual magnitudes - -.. image:: images/residual-directions.svg - :alt: A diagram showing residual directions - -Clearly we don't have anywhere near enough data to draw any meaningful conclusions (yet). But for fun, let's dig into `camera uncertainty estimation `_. This diagram shows how expected projection error changes due to noise in calibration inputs. Lower projection error across a larger area of the sensor imply a better calibration that more fully covers the whole sensor. For my calibration data, you can tell the projection error isolines (lines of constant expected projection error) are skewed to the left, following my dataset (which was also skewed left). - -:: - - $ mrcal-show-projection-uncertainty --unset key ./cameramodel_0.cameramodel - -.. image:: images/camera-uncertainty.svg - :alt: A diagram showing camera uncertainty diff --git a/docs/source/docs/contributing/photonvision/assets/git-download.png b/docs/source/docs/contributing/assets/git-download.png similarity index 100% rename from docs/source/docs/contributing/photonvision/assets/git-download.png rename to docs/source/docs/contributing/assets/git-download.png diff --git a/docs/source/docs/contributing/building-docs.md b/docs/source/docs/contributing/building-docs.md new file mode 100644 index 0000000000..cde58472f7 --- /dev/null +++ b/docs/source/docs/contributing/building-docs.md @@ -0,0 +1,33 @@ +# Building the PhotonVision Documentation + +To build the PhotonVision documentation, you will require [Git](https://git-scm.com) and [Python 3.6 or greater](https://www.python.org). + +## Cloning the Documentation Repository + +Documentation lives within the main PhotonVision repository within the `docs` sub-folder. If you are planning on contributing, it is recommended to create a fork of the [PhotonVision repository](https://github.com/PhotonVision/photonvision). To clone this fork, run the following command in a terminal window: + +`git clone https://github.com/[your username]/photonvision` + +## Installing Python Dependencies + +You must install a set of Python dependencies in order to build the documentation. To do so, you can run the following command in the docs sub-folder: + +`~/photonvision/docs$ python -m pip install -r requirements.txt` + +## Building the Documentation + +In order to build the documentation, you can run the following command in the docs sub-folder. This will automatically build docs every time a file changes, and serves them locally at `localhost:8000` by default. + +`~/photonvision/docs$ sphinx-autobuild --open-browser source/_build/html` + +## Opening the Documentation + +The built documentation is located at `docs/build/html/index.html` relative to the root project directory, or can be accessed via the local web server if using sphinx-autobuild. + +## Docs Builds on Pull Requests + +Pre-merge builds of docs can be found at: `https://photonvision-docs--PRNUMBER.org.readthedocs.build/en/PRNUMBER/index.html`. These docs are republished on every commit to a pull request made to PhotonVision/photonvision-docs. For example, PR 325 would have pre-merge documentation published to `https://photonvision-docs--325.org.readthedocs.build/en/325/index.html`. Additionally, the pull requrest will have a link directly to the pre-release build of the docs. This build only runs when there is a change to files in the docs sub-folder. + +## Style Guide + +PhotonVision follows the frc-docs style guide which can be found [here](https://docs.wpilib.org/en/stable/docs/contributing/style-guide.html). In order to run the linter locally (which builds on doc8 and checks for compliance with the style guide), follow the instructions [on GitHub](https://github.com/wpilibsuite/ohnoyoudidnt). diff --git a/docs/source/docs/contributing/building-photon.md b/docs/source/docs/contributing/building-photon.md new file mode 100644 index 0000000000..7eb7b8a99b --- /dev/null +++ b/docs/source/docs/contributing/building-photon.md @@ -0,0 +1,286 @@ +# Build Instructions + +This section contains the build instructions from the source code available at [our GitHub page](https://github.com/PhotonVision/photonvision). + +## Development Setup + +### Prerequisites + +**Java Development Kit:** + + This project requires Java Development Kit (JDK) 17 to be compiled. This is the same Java version that comes with WPILib for 2025+. If you don't have this JDK with WPILib, you can follow the instructions to install JDK 17 for your platform [here](https://bell-sw.com/pages/downloads/#jdk-17-lts). + +**Node JS:** + + The UI is written in Node JS. To compile the UI, Node 18.20.4 to Node 20.0.0 is required. To install Node JS follow the instructions for your platform [on the official Node JS website](https://nodejs.org/en/download/). However, modify this line + +```bash +nvm install 20 +``` + +so that it instead reads + +```javascript +nvm install 18.20.4 +``` + +## Compiling Instructions + +### Getting the Source Code + +Get the source code from git: + +```bash +git clone https://github.com/PhotonVision/photonvision +``` + +or alternatively download the source code from GitHub and extract the zip: + +```{image} assets/git-download.png +:alt: Download source code from git +:width: 600 +``` + +### Install Necessary Node JS Dependencies + +In the photon-client directory: + +```bash +npm install +``` + +### Build and Copy UI to Java Source + +In the root directory: + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew buildAndCopyUI`` + + .. tab-item:: macOS + + ``./gradlew buildAndCopyUI`` + + .. tab-item:: Windows (cmd) + + ``gradlew buildAndCopyUI`` +``` + +### Build and Run PhotonVision + +To compile and run the project, issue the following command in the root directory: + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew run`` + + .. tab-item:: macOS + + ``./gradlew run`` + + .. tab-item:: Windows (cmd) + + ``gradlew run`` +``` + +Running the following command under the root directory will build the jar under `photon-server/build/libs`: + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew shadowJar`` + + .. tab-item:: macOS + + ``./gradlew shadowJar`` + + .. tab-item:: Windows (cmd) + + ``gradlew shadowJar`` +``` + +### Build and Run PhotonVision on a Raspberry Pi Coprocessor + +As a convenience, the build has a built-in `deploy` command which builds, deploys, and starts the current source code on a coprocessor. + +An architecture override is required to specify the deploy target's architecture. + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew clean`` + + ``./gradlew deploy -PArchOverride=linuxarm64`` + + .. tab-item:: macOS + + ``./gradlew clean`` + + ``./gradlew deploy -PArchOverride=linuxarm64`` + + .. tab-item:: Windows (cmd) + + ``gradlew clean`` + + ``gradlew deploy -PArchOverride=linuxarm64`` +``` + +The `deploy` command is tested against Raspberry Pi coprocessors. Other similar coprocessors may work too. + +### Using PhotonLib Builds + +The build process includes the following task: + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew generateVendorJson`` + + .. tab-item:: macOS + + ``./gradlew generateVendorJson`` + + .. tab-item:: Windows (cmd) + + ``gradlew generateVendorJson`` +``` + +This generates a vendordep JSON of your local build at `photon-lib/build/generated/vendordeps/photonlib.json`. + +The photonlib source can be published to your local maven repository after building: + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew publishToMavenLocal`` + + .. tab-item:: macOS + + ``./gradlew publishToMavenLocal`` + + .. tab-item:: Windows (cmd) + + ``gradlew publishToMavenLocal`` +``` + +After adding the generated vendordep to your project, add the following to your project's `build.gradle` under the `plugins {}` block. + +```Java +repositories { + mavenLocal() +} +``` + +### Debugging PhotonVision Running Locally + +One way is by running the program using gradle with the {code}`--debug-jvm` flag. Run the program with {code}`./gradlew run --debug-jvm`, and attach to it with VSCode by adding the following to {code}`launch.json`. Note args can be passed with {code}`--args="foobar"`. + +``` +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "java", + "name": "Attach to Remote Program", + "request": "attach", + "hostName": "localhost", + "port": "5005", + "projectName": "photon-core", + } + ] +} +``` + +PhotonVision can also be run using the gradle tasks plugin with {code}`"args": "--debug-jvm"` added to launch.json. + +### Debugging PhotonVision Running on a CoProcessor + +Set up a VSCode configuration in {code}`launch.json` + +``` +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "java", + "name": "Attach to CoProcessor", + "request": "attach", + "hostName": "photonvision.local", + "port": "5801", + "projectName": "photon-core" + }, + ] +} +``` + +Stop any existing instance of PhotonVision. + +Launch the program with the following additional argument to the JVM: {code}`java -jar -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5801 photonvision.jar` + +Once the program says it is listening on port 5801, launch the debug configuration in VSCode. + +The program will wait for the VSCode debugger to attach before proceeding. + +### Running examples + +You can run one of the many built in examples straight from the command line, too! They contain a fully featured robot project, and some include simulation support. The projects can be found inside the photonlib-*-examples subdirectories for each language. + +#### Running C++/Java + +PhotonLib must first be published to your local maven repository, then the copy PhotonLib task will copy the generated vendordep json file into each example. After that, the simulateJava/simulateNative task can be used like a normal robot project. Robot simulation with attached debugger is technically possible by using simulateExternalJava and modifying the launch script it exports, though not yet supported. + +``` +~/photonvision$ ./gradlew publishToMavenLocal + +~/photonvision$ cd photonlib-java-examples +~/photonvision/photonlib-java-examples$ ./gradlew copyPhotonlib +~/photonvision/photonlib-java-examples$ ./gradlew :simulateJava + +~/photonvision$ cd photonlib-cpp-examples +~/photonvision/photonlib-cpp-examples$ ./gradlew copyPhotonlib +~/photonvision/photonlib-cpp-examples$ ./gradlew :simulateNative +``` + +#### Running Python + +PhotonLibPy must first be built into a wheel. + +``` +> cd photon-lib/py +> buildAndTest.bat +``` + +Then, you must enable using the development wheels. robotpy will use pip behind the scenes, and this bat file tells pip about your development artifacts. + +Note: This is best done in a virtual environment. + +``` +> enableUsingDevBuilds.bat +``` + +Then, run the examples: + +``` +> cd photonlib-python-examples +> run.bat +``` diff --git a/docs/source/docs/contributing/developer-docs/index.md b/docs/source/docs/contributing/developer-docs/index.md new file mode 100644 index 0000000000..6f6aa85ff8 --- /dev/null +++ b/docs/source/docs/contributing/developer-docs/index.md @@ -0,0 +1,5 @@ +# PhotonVision Developer Documentation + +```{toctree} +photonlib-backups +``` diff --git a/docs/source/docs/contributing/developer-docs/photonlib-backups.md b/docs/source/docs/contributing/developer-docs/photonlib-backups.md new file mode 100644 index 0000000000..87e01703c9 --- /dev/null +++ b/docs/source/docs/contributing/developer-docs/photonlib-backups.md @@ -0,0 +1,16 @@ +# Photonlib Developer Docs + +Our maven server is located at https://maven.photonvision.org/#/. This server runs [Reposilite](https://hub.docker.com/r/dzikoysk/reposilite) in Docker, and uses Caddy for serving requests. + + +## Backing up using Rsync + +The Clarkson Open Source Institute at Clarkson University provides a mirror of our artifacts available [online](https://mirror.clarkson.edu/photonvision). Learn more about them at [their homepage](https://mirror.clarkson.edu/home). + +Artifacts from our Maven server can also be backed up locally to a folder called `photonlib-backup` using the following command, which excludes "snapshots" for space reasons: + +``` +rsync -avzrHy --no-perms --no-group --no-owner --ignore-errors --exclude ".~tmp~" --exclude "snapshots/org/photonvision/photontargeting*" \ +--exclude "snapshots/org/photonvision/photonlib*" maven.photonvision.org::reposilite-data \ +/path/to/photonlib-backup +``` diff --git a/docs/source/docs/contributing/index.md b/docs/source/docs/contributing/index.md new file mode 100644 index 0000000000..d9f1137b3e --- /dev/null +++ b/docs/source/docs/contributing/index.md @@ -0,0 +1,7 @@ +# Contributing to PhotonVision Projects + +```{toctree} +building-photon +building-docs +developer-docs/index +``` diff --git a/docs/source/docs/contributing/index.rst b/docs/source/docs/contributing/index.rst deleted file mode 100644 index 14b1dceb56..0000000000 --- a/docs/source/docs/contributing/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Contributing to PhotonVision Projects -===================================== - -.. toctree:: - - photonvision/index - photonvision-docs/index diff --git a/docs/source/docs/contributing/photonvision-docs/building-docs.rst b/docs/source/docs/contributing/photonvision-docs/building-docs.rst deleted file mode 100644 index 93f63d6cd0..0000000000 --- a/docs/source/docs/contributing/photonvision-docs/building-docs.rst +++ /dev/null @@ -1,32 +0,0 @@ -Building the PhotonVision Documentation -======================================= -To build the PhotonVision documentation, you will require `Git `_ and `Python 3.6 or greater `_. - -Cloning the Documentation Repository ------------------------------------- -If you are planning on contributing, it is recommended to create a fork of the `main docs repository `_. To clone this fork, run the following command in a terminal window: - -``git clone https://github.com/[your username]/photonvision-docs`` - -Installing Python Dependencies ------------------------------- -You must install a set of Python dependencies in order to build the documentation. To do so, you can run the following command in the root project directory: - -``python -m pip install -r requirements.txt`` - -Building the Documentation --------------------------- -In order to build the documentation, you can run the following command in the root project directory: - -``make html`` - -.. note:: You may have to run ``./make html`` on Windows. - -Opening the Documentation -------------------------- -The built documentation is located at ``build/html/index.html``. - -Docs Builds on Pull Requests ----------------------------- - -Pre-merge builds of docs can be found at: ``https://photonvision-docs--PRNUMBER.org.readthedocs.build/en/PRNUMBER/index.html``. These docs are republished on every commit to a pull request made to PhotonVision/photonvision-docs. For example, PR 325 would have pre-merge documentation published to ``https://photonvision-docs--325.org.readthedocs.build/en/325/index.html`` diff --git a/docs/source/docs/contributing/photonvision-docs/index.rst b/docs/source/docs/contributing/photonvision-docs/index.rst deleted file mode 100644 index d869754ac1..0000000000 --- a/docs/source/docs/contributing/photonvision-docs/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Contributing to PhotonVision Documentation -========================================== - -.. toctree:: - - building-docs - style-guide - top-contributors diff --git a/docs/source/docs/contributing/photonvision-docs/style-guide.rst b/docs/source/docs/contributing/photonvision-docs/style-guide.rst deleted file mode 100644 index 6b130c76e8..0000000000 --- a/docs/source/docs/contributing/photonvision-docs/style-guide.rst +++ /dev/null @@ -1,3 +0,0 @@ -Style Guide -=========== -PhotonVision follows the frc-docs style guide which can be found `here `_. In order to run the linter locally (which builds on doc8 and checks for compliance with the style guide), follow the instructions `on GitHub `_. diff --git a/docs/source/docs/contributing/photonvision-docs/top-contributors.rst b/docs/source/docs/contributing/photonvision-docs/top-contributors.rst deleted file mode 100644 index bc1fe933ea..0000000000 --- a/docs/source/docs/contributing/photonvision-docs/top-contributors.rst +++ /dev/null @@ -1,5 +0,0 @@ -Top Contributors -================ - -.. ghcontributors:: PhotonVision/photonvision-docs - :limit: 10 diff --git a/docs/source/docs/contributing/photonvision/build-instructions.rst b/docs/source/docs/contributing/photonvision/build-instructions.rst deleted file mode 100644 index 85d8d63672..0000000000 --- a/docs/source/docs/contributing/photonvision/build-instructions.rst +++ /dev/null @@ -1,267 +0,0 @@ -Build Instructions -================== - -This section contains the build instructions from the source code available at `our GitHub page `_. - -Development Setup ------------------ - -Prerequisites -~~~~~~~~~~~~~ - -| **Java Development Kit:** This project requires Java Development Kit (JDK) 17 to be compiled. This is the same Java version that comes with WPILib for 2025+. If you don't have this JDK with WPILib, you can follow the instructions to install JDK 17 for your platform `here `_. -| **Node JS:** The UI is written in Node JS. To compile the UI, Node 14.18.0 to Node 16.0.0 is required. To install Node JS follow the instructions for your platform `on the official Node JS website `_. However, modify this line - -.. code-block:: bash - - nvm install 20 - -so that it instead reads - -.. code-block:: javascript - - nvm install 14.18.0 - -Compiling Instructions ----------------------- - -Getting the Source Code -~~~~~~~~~~~~~~~~~~~~~~~ -Get the source code from git: - -.. code-block:: bash - - git clone https://github.com/PhotonVision/photonvision - -or alternatively download the source code from github and extract the zip: - -.. image:: assets/git-download.png - :width: 600 - :alt: Download source code from git - -Install Necessary Node JS Dependencies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the photon-client directory: - -.. code-block:: bash - - npm install - -Build and Copy UI to Java Source -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the root directory: - - -.. tab-set:: - - .. tab-item:: Linux - - ``./gradlew buildAndCopyUI`` - - .. tab-item:: macOS - - ``./gradlew buildAndCopyUI`` - - .. tab-item:: Windows (cmd) - - ``gradlew buildAndCopyUI`` - -Build and Run PhotonVision -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To compile and run the project, issue the following command in the root directory: - -.. tab-set:: - - .. tab-item:: Linux - - ``./gradlew run`` - - .. tab-item:: macOS - - ``./gradlew run`` - - .. tab-item:: Windows (cmd) - - ``gradlew run`` - -Running the following command under the root directory will build the jar under ``photon-server/build/libs``: - -.. tab-set:: - - .. tab-item:: Linux - - ``./gradlew shadowJar`` - - .. tab-item:: macOS - - ``./gradlew shadowJar`` - - .. tab-item:: Windows (cmd) - - ``gradlew shadowJar`` - -Build and Run PhotonVision on a Raspberry Pi Coprocessor -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As a convenience, the build has a built-in `deploy` command which builds, deploys, and starts the current source code on a coprocessor. - -An architecture override is required to specify the deploy target's architecture. - -.. tab-set:: - - .. tab-item:: Linux - - ``./gradlew clean`` - - ``./gradlew deploy -PArchOverride=linuxarm64`` - - .. tab-item:: macOS - - ``./gradlew clean`` - - ``./gradlew deploy -PArchOverride=linuxarm64`` - - .. tab-item:: Windows (cmd) - - ``gradlew clean`` - - ``gradlew deploy -PArchOverride=linuxarm64`` - -The ``deploy`` command is tested against Raspberry Pi coprocessors. Other similar coprocessors may work too. - -Using PhotonLib Builds -~~~~~~~~~~~~~~~~~~~~~~ - -The build process includes the following task: - -.. tab-set:: - - .. tab-item:: Linux - - ``./gradlew generateVendorJson`` - - .. tab-item:: macOS - - ``./gradlew generateVendorJson`` - - .. tab-item:: Windows (cmd) - - ``gradlew generateVendorJson`` - -This generates a vendordep JSON of your local build at ``photon-lib/build/generated/vendordeps/photonlib.json``. - -The photonlib source can be published to your local maven repository after building: - -.. tab-set:: - - .. tab-item:: Linux - - ``./gradlew publishToMavenLocal`` - - .. tab-item:: macOS - - ``./gradlew publishToMavenLocal`` - - .. tab-item:: Windows (cmd) - - ``gradlew publishToMavenLocal`` - -After adding the generated vendordep to your project, add the following to your project's ``build.gradle`` under the ``plugins {}`` block. - -.. code-block:: Java - - repositories { - mavenLocal() - } - - -Debugging PhotonVision Running Locally -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One way is by running the program using gradle with the :code:`--debug-jvm` flag. Run the program with :code:`./gradlew run --debug-jvm`, and attach to it with VSCode by adding the following to :code:`launch.json`. Note args can be passed with :code:`--args="foobar"`. - -.. code-block:: - - { - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "type": "java", - "name": "Attach to Remote Program", - "request": "attach", - "hostName": "localhost", - "port": "5005", - "projectName": "photon-core", - } - ] - } - -PhotonVision can also be run using the gradle tasks plugin with :code:`"args": "--debug-jvm"` added to launch.json. - - -Debugging PhotonVision Running on a CoProcessor -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Set up a VSCode configuration in :code:`launch.json` - -.. code-block:: - - { - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "type": "java", - "name": "Attach to CoProcessor", - "request": "attach", - "hostName": "photonvision.local", - "port": "5801", - "projectName": "photon-core" - }, - ] - } - -Stop any existing instance of PhotonVision. - -Launch the program with the following additional argument to the JVM: :code:`java -jar -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5801 photonvision.jar` - -Once the program says it is listening on port 5801, launch the debug configuration in VSCode. - -The program will wait for the VSCode debugger to attach before proceeding. - -Running examples -~~~~~~~~~~~~~~~~ - -You can run one of the many built in examples straight from the command line, too! They contain a fully featured robot project, and some include simulation support. The projects can be found inside the photonlib-java-examples and photonlib-cpp-examples subdirectories, respectively. The projects currently available include: - -- photonlib-java-examples: - - aimandrange:simulateJava - - aimattarget:simulateJava - - getinrange:simulateJava - - simaimandrange:simulateJava - - simposeest:simulateJava -- photonlib-cpp-examples: - - aimandrange:simulateNative - - getinrange:simulateNative - -To run them, use the commands listed below. Photonlib must first be published to your local maven repository, then the copyPhotonlib task will copy the generated vendordep json file into each example. After that, the simulateJava/simulateNative task can be used like a normal robot project. Robot simulation with attached debugger is technically possible by using simulateExternalJava and modifying the launch script it exports, though unsupported. - -.. code-block:: - - ~/photonvision$ ./gradlew publishToMavenLocal - - ~/photonvision$ cd photonlib-java-examples - ~/photonvision/photonlib-java-examples$ ./gradlew copyPhotonlib - ~/photonvision/photonlib-java-examples$ ./gradlew :simulateJava - - ~/photonvision$ cd photonlib-cpp-examples - ~/photonvision/photonlib-cpp-examples$ ./gradlew copyPhotonlib - ~/photonvision/photonlib-cpp-examples$ ./gradlew :simulateNative diff --git a/docs/source/docs/contributing/photonvision/index.rst b/docs/source/docs/contributing/photonvision/index.rst deleted file mode 100644 index 93185e0641..0000000000 --- a/docs/source/docs/contributing/photonvision/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Contributing to PhotonVision -============================ - -.. toctree:: - - build-instructions - top-contributors diff --git a/docs/source/docs/contributing/photonvision/top-contributors.rst b/docs/source/docs/contributing/photonvision/top-contributors.rst deleted file mode 100644 index 9c27a60562..0000000000 --- a/docs/source/docs/contributing/photonvision/top-contributors.rst +++ /dev/null @@ -1,5 +0,0 @@ -Top Contributors -================ - -.. ghcontributors:: PhotonVision/photonvision - :limit: 10 diff --git a/docs/source/docs/description.rst b/docs/source/docs/description.md similarity index 57% rename from docs/source/docs/description.rst rename to docs/source/docs/description.md index a75ab7b71e..2a962188e8 100644 --- a/docs/source/docs/description.rst +++ b/docs/source/docs/description.md @@ -1,47 +1,46 @@ -About PhotonVision -================== +# About PhotonVision -Description -^^^^^^^^^^^ -PhotonVision is a free, fast, and easy-to-use vision processing solution for the *FIRST*\ Robotics Competition. PhotonVision is designed to get vision working on your robot *quickly*, without the significant cost of other similar solutions. -Using PhotonVision, teams can go from setting up a camera and coprocessor to detecting and tracking targets by simply tuning sliders. With an easy to use interface, comprehensive documentation, and a feature rich vendor dependency, no experience is necessary to use PhotonVision. No matter your resources, using PhotonVision is easy compared to its alternatives. +## Description + +PhotonVision is a free, fast, and easy-to-use vision processing solution for the *FIRST*Robotics Competition. PhotonVision is designed to get vision working on your robot *quickly*, without the significant cost of other similar solutions. +Using PhotonVision, teams can go from setting up a camera and coprocessor to detecting and tracking AprilTags and other targets by simply tuning sliders. With an easy to use interface, comprehensive documentation, and a feature rich vendor dependency, no experience is necessary to use PhotonVision. No matter your resources, using PhotonVision is easy compared to its alternatives. + +## Advantages -Advantages -^^^^^^^^^^ PhotonVision has a myriad of advantages over similar solutions, including: -Affordable ----------- -Compared to alternatives, PhotonVision is much cheaper to use (at the cost of your coprocessor and camera) compared to alternatives that cost $400. This allows your team to save money while still being competitive. +### Affordable + +Compared to alternatives, PhotonVision is much cheaper to use (at the cost of your coprocessor and camera) compared to alternatives that cost \$400. This allows your team to save money while still being competitive. + +### Easy to Use User Interface -Easy to Use User Interface --------------------------- The PhotonVision user interface is simple and modular, making things easier for the user. With a simpler interface, you can focus on what matters most, tracking targets, rather than how to use our UI. A major unique quality is that the PhotonVision UI includes an offline copy of our documentation for your ease of access at competitions. -PhotonLib Vendor Dependency ---------------------------- +### PhotonLib Vendor Dependency + The PhotonLib vendor dependency allows you to easily get necessary target data (without having to work directly with NetworkTables) while also providing utility methods to get distance and position on the field. This helps your team focus less on getting data and more on using it to do cool things. This also has the benefit of having a structure that ensures all data is from the same timestamp, which is helpful for latency compensation. -User Calibration ----------------- +### User Calibration + Using PhotonVision allows the user to calibrate for their specific camera, which will get you the best tracking results. This is extremely important as every camera (even if it is the same model) will have it's own quirks and user calibration allows for those to be accounted for. -High FPS Processing -------------------- +### High FPS Processing + Compared to alternative solutions, PhotonVision boasts higher frames per second which allows for a smoother video stream and detection of targets to ensure you aren't losing out on any performance. -Low Latency ------------ +### Low Latency + PhotonVision provides low latency processing to make sure you get vision measurements as fast as possible, which makes complex vision tasks easier. We guarantee that all measurements are sent from the same timestamp, making life easier for your programmers. -Fully Open Source and Active Developer Community ------------------------------------------------- -You can find all of our code on `GitHub `_, including code for our main program, documentation, vendor dependency (PhotonLib), and more. This helps you see everything working behind the scenes and increases transparency. This also allows users to make pull requests for features that they want to add in to PhotonVision that will be reviewed by the development team. PhotonVision is licensed under the GNU General Public License (GPLv3) which you can learn more about `here `_. +### Fully Open Source and Active Developer Community + +You can find all of our code on [GitHub](https://github.com/PhotonVision), including code for our main program, documentation, vendor dependency (PhotonLib), and more. This helps you see everything working behind the scenes and increases transparency. This also allows users to make pull requests for features that they want to add in to PhotonVision that will be reviewed by the development team. PhotonVision is licensed under the GNU General Public License (GPLv3) which you can learn more about [here](https://www.gnu.org/licenses/quick-guide-gplv3.html). + +### Multi-Camera Support -Multi-Camera Support --------------------- You can use multiple cameras within PhotonVision, allowing you to see multiple angles without the need to buy multiple coprocessors. This makes vision processing more affordable and simpler for your team. -Comprehensive Documentation ---------------------------- +### Comprehensive Documentation + Using our comprehensive documentation, you will be able to easily start vision processing by following a series of simple steps. diff --git a/docs/source/docs/examples/aimandrange.md b/docs/source/docs/examples/aimandrange.md new file mode 100644 index 0000000000..556da387b0 --- /dev/null +++ b/docs/source/docs/examples/aimandrange.md @@ -0,0 +1,50 @@ +# Combining Aiming and Getting in Range + +The following example is from the PhotonLib example repository ([Java](https://github.com/PhotonVision/photonvision/tree/master/photonlib-java-examples/aimandrange)/[C++](https://github.com/PhotonVision/photonvision/tree/master/photonlib-cpp-examples/aimandrange)). + +## Knowledge and Equipment Needed + +- Everything required in {ref}`Aiming at a Target `. + +## Code + +Now that you know how to aim toward the AprilTag, let's also drive the correct distance from the AprilTag. + +To do this, we'll use the *pitch* of the target in the camera image and trigonometry to figure out how far away the robot is from the AprilTag. Then, like before, we'll use the P term of a PID controller to drive the robot to the correct distance. + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/aimandrange/src/main/java/frc/robot/Robot.java + :language: java + :lines: 84-131 + :linenos: + :lineno-start: 84 + + .. tab-item:: C++ (Header) + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/aimandrange/src/main/include/Robot.h + :language: c++ + :lines: 25-63 + :linenos: + :lineno-start: 25 + + .. tab-item:: C++ (Source) + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/aimandrange/src/main/cpp/Robot.cpp + :language: c++ + :lines: 58-107 + :linenos: + :lineno-start: 58 + + .. tab-item:: Python + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-python-examples/aimandrange/robot.py + :language: python + :lines: 44-98 + :linenos: + :lineno-start: 44 + +``` diff --git a/docs/source/docs/examples/aimandrange.rst b/docs/source/docs/examples/aimandrange.rst deleted file mode 100644 index 9d3924f3a0..0000000000 --- a/docs/source/docs/examples/aimandrange.rst +++ /dev/null @@ -1,41 +0,0 @@ -Combining Aiming and Getting in Range -===================================== - - -The following example is from the PhotonLib example repository (`Java `_/`C++ `_). - -Knowledge and Equipment Needed ------------------------------------------------ - -- Everything required in :ref:`Aiming at a Target ` and :ref:`Getting in Range of the Target `. - -Code -------- - -Now that you know how to both aim and get in range of the target, it is time to combine them both at the same time. This example will take the previous two code examples and make them into one function using the same tools as before. With this example, you now have all the knowledge you need to use PhotonVision on your robot in any game. - -.. tab-set:: - - .. tab-item:: Java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/aimandrange/src/main/java/frc/robot/Robot.java - :language: java - :lines: 42-111 - :linenos: - :lineno-start: 42 - - .. tab-item:: C++ (Header) - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimandrange/src/main/include/Robot.h - :language: cpp - :lines: 27-71 - :linenos: - :lineno-start: 27 - - .. tab-item:: C++ (Source) - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimandrange/src/main/cpp/Robot.cpp - :language: cpp - :lines: 25-67 - :linenos: - :lineno-start: 25 diff --git a/docs/source/docs/examples/aimingatatarget.md b/docs/source/docs/examples/aimingatatarget.md new file mode 100644 index 0000000000..b275baefa1 --- /dev/null +++ b/docs/source/docs/examples/aimingatatarget.md @@ -0,0 +1,55 @@ +# Aiming at a Target + +The following example is from the PhotonLib example repository ([Java](https://github.com/PhotonVision/photonvision/tree/master/photonlib-java-examples/aimattarget)). + +## Knowledge and Equipment Needed + +- A Robot +- A camera mounted rigidly to the robot's frame, cenetered and pointed forward. +- A coprocessor running PhotonVision with an AprilTag or Aurco 2D Pipeline. +- [A printout of Apriltag 7](https://firstfrc.blob.core.windows.net/frc2024/FieldAssets/Apriltag_Images_and_User_Guide.pdf), mounted on a rigid and flat surface. + +## Code + +Now that you have properly set up your vision system and have tuned a pipeline, you can now aim your robot at an AprilTag using the data from PhotonVision. The *yaw* of the target is the critical piece of data that will be needed first. + +Yaw is reported to the roboRIO over Network Tables. PhotonLib, our vender dependency, is the easiest way to access this data. The documentation for the Network Tables API can be found {ref}`here ` and the documentation for PhotonLib {ref}`here `. + +In this example, while the operator holds a button down, the robot will turn towards the AprilTag using the P term of a PID loop. To learn more about how PID loops work, how WPILib implements them, and more, visit [Advanced Controls (PID)](https://docs.wpilib.org/en/stable/docs/software/advanced-control/introduction/index.html) and [PID Control in WPILib](https://docs.wpilib.org/en/stable/docs/software/advanced-controls/controllers/pidcontroller.html#pid-control-in-wpilib). + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/aimattarget/src/main/java/frc/robot/Robot.java + :language: java + :lines: 77-117 + :linenos: + :lineno-start: 77 + + .. tab-item:: C++ (Header) + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/aimattarget/src/main/include/Robot.h + :language: c++ + :lines: 25-60 + :linenos: + :lineno-start: 25 + + .. tab-item:: C++ (Source) + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/aimattarget/src/main/cpp/Robot.cpp + :language: c++ + :lines: 56-96 + :linenos: + :lineno-start: 56 + + .. tab-item:: Python + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-python-examples/aimattarget/robot.py + :language: python + :lines: 46-70 + :linenos: + :lineno-start: 46 + +``` diff --git a/docs/source/docs/examples/aimingatatarget.rst b/docs/source/docs/examples/aimingatatarget.rst deleted file mode 100644 index 53c077f9d9..0000000000 --- a/docs/source/docs/examples/aimingatatarget.rst +++ /dev/null @@ -1,46 +0,0 @@ -Aiming at a Target -================== - -The following example is from the PhotonLib example repository (`Java `_/`C++ `_). - -Knowledge and Equipment Needed ------------------------------- - -- Robot with a vision system running PhotonVision -- Target -- Ability to track a target by properly tuning a pipeline - -Code -------- - -Now that you have properly set up your vision system and have tuned a pipeline, you can now aim your robot/turret at the target using the data from PhotonVision. This data is reported over NetworkTables and includes: latency, whether there is a target detected or not, pitch, yaw, area, skew, and target pose relative to the robot. This data will be used/manipulated by our vendor dependency, PhotonLib. The documentation for the Network Tables API can be found :ref:`here ` and the documentation for PhotonLib :ref:`here `. - -For this simple example, only yaw is needed. - -In this example, while the operator holds a button down, the robot will turn towards the goal using the P term of a PID loop. To learn more about how PID loops work, how WPILib implements them, and more, visit `Advanced Controls (PID) `_ and `PID Control in WPILib `_. - -.. tab-set:: - - .. tab-item:: Java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/aimattarget/src/main/java/frc/robot/Robot.java - :language: java - :lines: 41-98 - :linenos: - :lineno-start: 41 - - .. tab-item:: C++ (Header) - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimattarget/src/main/include/Robot.h - :language: c++ - :lines: 27-53 - :linenos: - :lineno-start: 27 - - .. tab-item:: C++ (Source) - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimattarget/src/main/cpp/Robot.cpp - :language: c++ - :lines: 25-52 - :linenos: - :lineno-start: 25 diff --git a/docs/source/docs/examples/gettinginrangeofthetarget.rst b/docs/source/docs/examples/gettinginrangeofthetarget.rst deleted file mode 100644 index 2e79455958..0000000000 --- a/docs/source/docs/examples/gettinginrangeofthetarget.rst +++ /dev/null @@ -1,54 +0,0 @@ -Getting in Range of the Target -============================== - -The following example is from the PhotonLib example repository (`Java `_/`C++ `_). - - -Knowledge and Equipment Needed ------------------------------------------------ - -- Everything required in :ref:`Aiming at a Target `. -- Large space where your robot can move around freely - -Code -------- - -In FRC, a mechanism usually has to be a certain distance away from its target in order to be effective and score. In the previous example, we showed how to aim your robot at the target. Now we will show how to move to a certain distance from the target. - -For proper functionality of just this example, ensure that your robot is pointed towards the target. - -While the operator holds down a button, the robot will drive towards the target and get in range. - -This example uses P term of the PID loop and PhotonLib and the distance function of PhotonUtils. - -.. warning:: The PhotonLib utility to calculate distance depends on the camera being at a different vertical height than the target. If this is not the case, a different method for estimating distance, such as target width or area, should be used. In general, this method becomes more accurate as range decreases and as the height difference increases. - -.. note:: There is no strict minimum delta-height necessary for this method to be applicable, just a requirement that a delta exists. - -.. tab-set:: - - .. tab-item:: Java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/getinrange/src/main/java/frc/robot/Robot.java - :language: java - :lines: 42-107 - :linenos: - :lineno-start: 42 - - .. tab-item:: C++ (Header) - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/getinrange/src/main/include/Robot.h - :language: c++ - :lines: 27-67 - :linenos: - :lineno-start: 27 - - .. tab-item:: C++ (Source) - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/getinrange/src/main/cpp/Robot.cpp - :language: c++ - :lines: 25-58 - :linenos: - :lineno-start: 25 - -.. hint:: The accuracy of the measurement of the camera's pitch (:code:`CAMERA_PITCH_RADIANS` in the above example), as well as the camera's FOV, will determine the overall accuracy of this method. diff --git a/docs/source/docs/examples/images/poseest_demo.gif b/docs/source/docs/examples/images/poseest_demo.gif new file mode 100644 index 0000000000..5326725375 Binary files /dev/null and b/docs/source/docs/examples/images/poseest_demo.gif differ diff --git a/docs/source/docs/examples/index.md b/docs/source/docs/examples/index.md new file mode 100644 index 0000000000..501b1b535b --- /dev/null +++ b/docs/source/docs/examples/index.md @@ -0,0 +1,9 @@ +# Code Examples + +```{toctree} +:maxdepth: 1 + +aimingatatarget +aimandrange +poseest +``` diff --git a/docs/source/docs/examples/index.rst b/docs/source/docs/examples/index.rst deleted file mode 100644 index b7407489d1..0000000000 --- a/docs/source/docs/examples/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -Code Examples -============= - -.. toctree:: - :maxdepth: 1 - - aimingatatarget - gettinginrangeofthetarget - aimandrange - simaimandrange - simposeest diff --git a/docs/source/docs/examples/poseest.md b/docs/source/docs/examples/poseest.md new file mode 100644 index 0000000000..3d10e6c875 --- /dev/null +++ b/docs/source/docs/examples/poseest.md @@ -0,0 +1,211 @@ +# Using WPILib Pose Estimation, Simulation, and PhotonVision Together + +The following example comes from the PhotonLib example repository ([Java](https://github.com/gerth2/photonvision/tree/master/photonlib-java-examples/poseest)/[C++](https://github.com/gerth2/photonvision/tree/master/photonlib-cpp-examples/poseest)/[Python](https://github.com/gerth2/photonvision/tree/master/photonlib-python-examples/poseest)). Full code is available at that links. + +## Knowledge and Equipment Needed + +- Everything required in {ref}`Combining Aiming and Getting in Range `, plus some familiarity with WPILib pose estimation functionality. + +## Background + +This example demonstrates integration of swerve drive control, a basic swerve physics simulation, and PhotonLib's simulated vision system functionality. + +## Walkthrough + +### Estimating Pose + +The {code}`Drivetrain` class includes functionality to fuse multiple sensor readings together (including PhotonVision) into a best-guess of the pose on the field. + +Please reference the [WPILib documentation](https://docs.wpilib.org/en/stable/docs/software/advanced-controls/state-space/state-space-pose_state-estimators.html) on using the {code}`SwerveDrivePoseEstimator` class. + +We use the 2024 game's AprilTag Locations: + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java + :language: java + :lines: 68-68 + :linenos: + :lineno-start: 68 + + .. tab-item:: C++ + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/poseest/src/main/include/Constants.h + :language: c++ + :lines: 42-43 + :linenos: + :lineno-start: 42 + + .. tab-item:: Python + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-python-examples/poseest/robot.py + :language: python + :lines: 46-46 + :linenos: + :lineno-start: 46 + +``` + + + +To incorporate PhotonVision, we need to create a {code}`PhotonCamera`: + + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java + :language: java + :lines: 57-57 + :linenos: + :lineno-start: 57 + + .. tab-item:: C++ + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/poseest/src/main/include/Vision.h + :language: c++ + :lines: 145-145 + :linenos: + :lineno-start: 145 + + .. tab-item:: Python + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-python-examples/poseest/robot.py + :language: python + :lines: 44-44 + :linenos: + :lineno-start: 44 +``` + +During periodic execution, we read back camera results. If we see AprilTags in the image, we calculate the camera-measured pose of the robot and pass it to the {code}`Drivetrain`. + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/poseest/src/main/java/frc/robot/Robot.java + :language: java + :lines: 64-74 + :linenos: + :lineno-start: 64 + + .. tab-item:: C++ + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/poseest/src/main/cpp/Robot.cpp + :language: c++ + :lines: 38-46 + :linenos: + :lineno-start: 38 + + .. tab-item:: Python + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-python-examples/poseest/robot.py + :language: python + :lines: 54-56 + :linenos: + :lineno-start: 54 + +``` + +### Simulating the Camera + +First, we create a new {code}`VisionSystemSim` to represent our camera and coprocessor running PhotonVision, and moving around our simulated field. + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java + :language: java + :lines: 65-69 + :linenos: + :lineno-start: 65 + + .. tab-item:: C++ + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/poseest/src/main/include/Vision.h + :language: c++ + :lines: 49-52 + :linenos: + :lineno-start: 49 + + .. tab-item:: Python + + # Coming Soon! + +``` + +Then, we add configure the simulated vision system to match the camera system being simulated. + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java + :language: java + :lines: 69-82 + :linenos: + :lineno-start: 69 + + .. tab-item:: C++ + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/poseest/src/main/include/Vision.h + :language: c++ + :lines: 53-65 + :linenos: + :lineno-start: 53 + + .. tab-item:: Python + + # Coming Soon! +``` + + +### Updating the Simulated Vision System + +During simulation, we periodically update the simulated vision system. + +```{eval-rst} +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-java-examples/poseest/src/main/java/frc/robot/Robot.java + :language: java + :lines: 114-132 + :linenos: + :lineno-start: 114 + + .. tab-item:: C++ + + .. rli:: https://raw.githubusercontent.com/gerth2/photonvision/adb3098fbe0cdbc1a378c6d5a41126dd1d6d6955/photonlib-cpp-examples/poseest/src/main/cpp/Robot.cpp + :language: c++ + :lines: 95-109 + :linenos: + :lineno-start: 95 + + .. tab-item:: Python + + # Coming Soon! +``` + +The rest is done behind the scenes. + +```{image} images/poseest_demo.gif +:alt: Simulated swerve drive and vision system working together in teleoperated mode. +:width: 1200 +``` diff --git a/docs/source/docs/examples/simaimandrange.rst b/docs/source/docs/examples/simaimandrange.rst deleted file mode 100644 index db20413a16..0000000000 --- a/docs/source/docs/examples/simaimandrange.rst +++ /dev/null @@ -1,94 +0,0 @@ -Simulating Aiming and Getting in Range -====================================== - -The following example comes from the PhotonLib example repository (`Java `_/`C++ `_). Full code is available at those links. - - -Knowledge and Equipment Needed ------------------------------------------------ - -- Everything required in :ref:`Combining Aiming and Getting in Range `. - -Background ----------- - -The previous examples show how to run PhotonVision on a real robot, with a physical robot drivetrain moving around and interacting with the software. - -This example builds upon that, adding support for simulating robot motion and incorporating that motion into a :code:`SimVisionSystem`. This allows you to test control algorithms on your development computer, without requiring access to a real robot. - -.. raw:: html - - - -Walkthrough ------------ - -First, in the main :code:`Robot` source file, we add support to periodically update a new simulation-specific object. This logic only gets used while running in simulation: - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/Robot.java - :language: java - :lines: 118-128 - :linenos: - :lineno-start: 118 - -Then, we add in the implementation of our new `DrivetrainSim` class. Please reference the `WPILib documentation on physics simulation `_. - -Simulated Vision support is added with the following steps: - -Creating the Simulated Vision System -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -First, we create a new :code:`SimVisionSystem` to represent our camera and coprocessor running PhotonVision. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 73-93 - :linenos: - :lineno-start: 72 - -Next, we create objects to represent the physical location and size of the vision targets we are calibrated to detect. This example models the down-field high goal vision target from the 2020 and 2021 games. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 95-111 - :linenos: - :lineno-start: 95 - -Finally, we add our target to the simulated vision system. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 116-117 - :linenos: - :lineno-start: 113 - - -If you have additional targets you want to detect, you can add them in the same way as the first one. - - -Updating the Simulated Vision System -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Once we have all the properties of our simulated vision system defined, the work to do at runtime becomes very minimal. Simply pass in the robot's pose periodically to the simulated vision system. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 124-142 - :linenos: - :lineno-start: 122 - - -The rest is done behind the scenes. diff --git a/docs/source/docs/examples/simposeest.rst b/docs/source/docs/examples/simposeest.rst deleted file mode 100644 index b1d0a2ec84..0000000000 --- a/docs/source/docs/examples/simposeest.rst +++ /dev/null @@ -1,129 +0,0 @@ -Using WPILib Pose Estimation, Simulation, and PhotonVision Together -=================================================================== - -The following example comes from the PhotonLib example repository (`Java `_). Full code is available at that links. - -Knowledge and Equipment Needed ------------------------------------------------ - -- Everything required in :ref:`Combining Aiming and Getting in Range `, plus some familiarity with WPILib pose estimation functionality. - -Background ----------- - -This example builds upon WPILib's `Differential Drive Pose Estimator `_. It adds a :code:`PhotonCamera` to gather estimates of the robot's position on the field. This in turn can be used for aligning with vision targets, and increasing accuracy of autonomous routines. - -To support simulation, a :code:`SimVisionSystem` is used to drive data into the :code:`PhotonCamera`. The far high goal target from 2020 is modeled. - -Walkthrough ------------ - -WPILib's :code:`Pose2d` class is used to represent robot positions on the field. - -Three different :code:`Pose2d` positions are relevant for this example: - -1) Desired Pose: The location some autonomous routine wants the robot to be in. -2) Estimated Pose: The location the software `believes` the robot to be in, based on physics models and sensor feedback. -3) Actual Pose: The locations the robot is actually at. The physics simulation generates this in simulation, but it cannot be directly measured on the real robot. - -Estimating Pose -^^^^^^^^^^^^^^^ - -The :code:`DrivetrainPoseEstimator` class is responsible for generating an estimated robot pose using sensor readings (including PhotonVision). - -Please reference the `WPILib documentation `_ on using the :code:`DifferentialDrivePoseEstimator` class. - -For both simulation and on-robot code, we create objects to represent the physical location and size of the vision targets we are calibrated to detect. This example models the down-field high goal vision target from the 2020 and 2021 games. - -.. tab-set:: - - .. tab-item:: Java - :sync: java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/Constants.java - :language: java - :lines: 83-106 - :linenos: - :lineno-start: 83 - - -To incorporate PhotonVision, we need to create a :code:`PhotonCamera`: - -.. tab-set:: - - .. tab-item:: Java - :sync: java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainPoseEstimator.java - :language: java - :lines: 46 - :linenos: - :lineno-start: 46 - -During periodic execution, we read back camera results. If we see a target in the image, we pass the camera-measured pose of the robot to the :code:`DifferentialDrivePoseEstimator`. - -.. tab-set:: - - .. tab-item:: Java - :sync: java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainPoseEstimator.java - :language: java - :lines: 81-92 - :linenos: - :lineno-start: 81 - - -That's it! - -Simulating the Camera -^^^^^^^^^^^^^^^^^^^^^ - -First, we create a new :code:`SimVisionSystem` to represent our camera and coprocessor running PhotonVision. - -.. tab-set:: - - .. tab-item:: Java - :sync: java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainSim.java - :language: java - :lines: 76-95 - :linenos: - :lineno-start: 76 - - -Then, we add our target to the simulated vision system. - -.. tab-set:: - - .. tab-item:: Java - :sync: java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainSim.java - :lines: 97-99 - :linenos: - :lineno-start: 97 - - -If you have additional targets you want to detect, you can add them in the same way as the first one. - - -Updating the Simulated Vision System -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Once we have all the properties of our simulated vision system defined, the remaining work is minimal. Periodically, pass in the robot's pose to the simulated vision system. - -.. tab-set:: - - .. tab-item:: Java - :sync: java - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainSim.java - :language: java - :lines: 138-139 - :linenos: - :lineno-start: 138 - - -The rest is done behind the scenes. diff --git a/docs/source/docs/hardware/customhardware.rst b/docs/source/docs/hardware/customhardware.md similarity index 61% rename from docs/source/docs/hardware/customhardware.rst rename to docs/source/docs/hardware/customhardware.md index b43d8a0753..43252b7f84 100644 --- a/docs/source/docs/hardware/customhardware.rst +++ b/docs/source/docs/hardware/customhardware.md @@ -1,18 +1,16 @@ -Deploying on Custom Hardware -============================ +# Deploying on Custom Hardware -Configuration -------------- +## Configuration By default, PhotonVision attempts to make minimal assumptions of the hardware it runs on. However, it may be configured to enable custom LED control, branding, and other functionality. -``hardwareConfig.json`` is the location for this configuration. It is included when settings are exported, and can be uploaded as part of a .zip, or on its own. +`hardwareConfig.json` is the location for this configuration. It is included when settings are exported, and can be uploaded as part of a .zip, or on its own. -LED Support ------------ +## LED Support -For Raspberry-Pi based hardware, PhotonVision can use `PiGPIO `_ to control IO pins. The mapping of which pins control which LED's is part of the hardware config. The pins are active-high: set high when LED's are commanded on, and set low when commanded off. +For Raspberry-Pi based hardware, PhotonVision can use [PiGPIO](https://abyz.me.uk/rpi/pigpio/) to control IO pins. The mapping of which pins control which LED's is part of the hardware config. The pins are active-high: set high when LED's are commanded on, and set low when commanded off. +```{eval-rst} .. tab-set-code:: .. code-block:: json @@ -27,16 +25,19 @@ For Raspberry-Pi based hardware, PhotonVision can use `PiGPIO `_ for the commands utilized. +:::{note} +These settings have no effect if PhotonVision detects it is running on a Raspberry Pi. See [the MetricsBase class](https://github.com/PhotonVision/photonvision/blob/dbd631da61b7c86b70fa6574c2565ad57d80a91a/photon-core/src/main/java/org/photonvision/common/hardware/metrics/MetricsBase.java) for the commands utilized. +::: -Known Camera FOV ----------------- +## Known Camera FOV If your hardware contains a camera with a known field of vision, it can be entered into the hardware configuration. This will prevent users from editing it in the GUI. +```{eval-rst} .. tab-set-code:: .. code-block:: json { "vendorFOV" : 98.9 } +``` -Cosmetic & Branding -------------------- +## Cosmetic & Branding To help differentiate your hardware from other solutions, some customization is allowed. +```{eval-rst} .. tab-set-code:: .. code-block:: json @@ -77,14 +82,17 @@ To help differentiate your hardware from other solutions, some customization is "deviceLogoPath" : "", "supportURL" : "https://cat-bounce.com/", } +``` -.. note:: Not all configuration is currently presented in the User Interface. Additional file uploads may be needed to support custom images. +:::{note} +Not all configuration is currently presented in the User Interface. Additional file uploads may be needed to support custom images. +::: -Example -------- +## Example -Here is a complete example ``hardwareConfig.json``: +Here is a complete example `hardwareConfig.json`: +```{eval-rst} .. tab-set-code:: .. code-block:: json @@ -110,3 +118,4 @@ Here is a complete example ``hardwareConfig.json``: "restartHardwareCommand" : "", "vendorFOV" : 72.5 } +``` diff --git a/docs/source/docs/hardware/index.md b/docs/source/docs/hardware/index.md new file mode 100644 index 0000000000..bb4a1a99f7 --- /dev/null +++ b/docs/source/docs/hardware/index.md @@ -0,0 +1,9 @@ +# Hardware Selection + +```{toctree} +:maxdepth: 2 + +selecting-hardware +picamconfig +customhardware +``` diff --git a/docs/source/docs/hardware/index.rst b/docs/source/docs/hardware/index.rst deleted file mode 100644 index 1e4a2f3bec..0000000000 --- a/docs/source/docs/hardware/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Hardware Selection -================== - -.. toctree:: - :maxdepth: 2 - - selecting-hardware - picamconfig - customhardware diff --git a/docs/source/docs/hardware/picamconfig.md b/docs/source/docs/hardware/picamconfig.md new file mode 100644 index 0000000000..4a2226abe5 --- /dev/null +++ b/docs/source/docs/hardware/picamconfig.md @@ -0,0 +1,58 @@ +# Pi Camera Configuration + +## Background + +The Raspberry Pi CSI Camera port is routed through and processed by the GPU. Since the GPU boots before the CPU, it must be configured properly for the attached camera. Additionally, this configuration cannot be changed without rebooting. + +The GPU is not always capable of detecting other cameras automatically. The file `/boot/config.txt` is parsed by the GPU at boot time to determine what camera, if any, is expected to be attached. This file must be updated for some cameras. + +:::{warning} +Incorrect camera configuration will cause the camera to not be detected. It looks exactly the same as if the camera was unplugged. +::: + +## Updating `config.txt` + +After flashing the pi image onto an SD card, open the `boot` segment in a file browser. + +:::{note} +Windows may report "There is a problem with this drive". This should be ignored. +::: + +Locate `config.txt` in the folder, and open it with your favorite text editor. + +```{image} images/bootConfigTxt.png +``` + +Within the file, find this block of text: + +``` +############################################################## +### PHOTONVISION CAM CONFIG +### Comment/Uncomment to change which camera is supported +### Picam V1, V2 or HQ: uncomment (remove leading # ) from camera_auto_detect=1, +### and comment out all following lines +### IMX290/327/OV9281/Any other cameras that require additional overlays: +### Comment out (add a # ) to camera_auto_detect=1, and uncomment the line for +### the sensor you're trying to user + +cameraAutoDetect=1 + +# dtoverlay=imx290,clock-frequency=74250000 +# dtoverlay=imx290,clock-frequency=37125000 +# dtoverlay=imx378 +# dtoverlay=ov9281 + +############################################################## +``` + +Remove the leading `#` character to uncomment the line associated with your camera. Add a `#` in front of other cameras. + +:::{warning} +Leave lines outside the PhotonVision Camera Config block untouched. They are necessary for proper raspberry pi functionality. +::: + +Save the file, close the editor, and eject the drive. The boot configuration should now be ready for your selected camera. + +## Additional Information + +See [the libcamera documentation](https://github.com/raspberrypi/documentation/blob/679fab721855a3e8f17aa51819e5c2a7c447e98d/documentation/asciidoc/computers/camera/rpicam_configuration.adoc) for more details on configuring cameras. diff --git a/docs/source/docs/hardware/picamconfig.rst b/docs/source/docs/hardware/picamconfig.rst deleted file mode 100644 index 085d795f1b..0000000000 --- a/docs/source/docs/hardware/picamconfig.rst +++ /dev/null @@ -1,55 +0,0 @@ -Pi Camera Configuration -======================= - -Background ----------- - -The Raspberry Pi CSI Camera port is routed through and processed by the GPU. Since the GPU boots before the CPU, it must be configured properly for the attached camera. Additionally, this configuration cannot be changed without rebooting. - -The GPU is not always capable of detecting other cameras automatically. The file ``/boot/config.txt`` is parsed by the GPU at boot time to determine what camera, if any, is expected to be attached. This file must be updated for some cameras. - -.. warning:: Incorrect camera configuration will cause the camera to not be detected. It looks exactly the same as if the camera was unplugged. - -Updating ``config.txt`` ------------------------ - -After flashing the pi image onto an SD card, open the ``boot`` segment in a file browser. - -.. note:: Windows may report "There is a problem with this drive". This should be ignored. - -Locate ``config.txt`` in the folder, and open it with your favorite text editor. - -.. image:: images/bootConfigTxt.png - -Within the file, find this block of text: - -.. code-block:: - - ############################################################## - ### PHOTONVISION CAM CONFIG - ### Comment/Uncomment to change which camera is supported - ### Picam V1, V2 or HQ: uncomment (remove leading # ) from camera_auto_detect=1, - ### and comment out all following lines - ### IMX290/327/OV9281/Any other cameras that require additional overlays: - ### Comment out (add a # ) to camera_auto_detect=1, and uncomment the line for - ### the sensor you're trying to user - - cameraAutoDetect=1 - - # dtoverlay=imx290,clock-frequency=74250000 - # dtoverlay=imx290,clock-frequency=37125000 - # dtoverlay=imx378 - # dtoverlay=ov9281 - - ############################################################## - -Remove the leading ``#`` character to uncomment the line associated with your camera. Add a ``#`` in front of other cameras. - -.. warning:: Leave lines outside the PhotonVision Camera Config block untouched. They are necessary for proper raspberry pi functionality. - -Save the file, close the editor, and eject the drive. The boot configuration should now be ready for your selected camera. - -Additional Information ----------------------- - -See `the libcamera documentation `_ for more details on configuring cameras. diff --git a/docs/source/docs/hardware/selecting-hardware.rst b/docs/source/docs/hardware/selecting-hardware.md similarity index 54% rename from docs/source/docs/hardware/selecting-hardware.rst rename to docs/source/docs/hardware/selecting-hardware.md index 82a59bcf10..9d26d0a321 100644 --- a/docs/source/docs/hardware/selecting-hardware.rst +++ b/docs/source/docs/hardware/selecting-hardware.md @@ -1,101 +1,96 @@ -Selecting Hardware -================== +# Selecting Hardware In order to use PhotonVision, you need a coprocessor and a camera. This page will help you select the right hardware for your team depending on your budget, needs, and experience. -Choosing a Coprocessor ----------------------- +## Choosing a Coprocessor -Minimum System Requirements -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +### Minimum System Requirements -* Ubuntu 22.04 LTS or Windows 10/11 - * We don't recommend using Windows for anything except testing out the system on a local machine. -* CPU: ARM Cortex-A53 (the CPU on Raspberry Pi 3) or better -* At least 8GB of storage -* 2GB of RAM - * PhotonVision isn't very RAM intensive, but you'll need at least 2GB to run the OS and PhotonVision. -* The following IO: - * At least 1 USB or MIPI-CSI port for the camera - * Note that we only support using the Raspberry Pi's MIPI-CSI port, other MIPI-CSI ports from other coprocessors may not work. - * Ethernet port for networking +- Ubuntu 22.04 LTS or Windows 10/11 + - We don't recommend using Windows for anything except testing out the system on a local machine. +- CPU: ARM Cortex-A53 (the CPU on Raspberry Pi 3) or better +- At least 8GB of storage +- 2GB of RAM + - PhotonVision isn't very RAM intensive, but you'll need at least 2GB to run the OS and PhotonVision. +- The following IO: + - At least 1 USB or MIPI-CSI port for the camera + - Note that we only support using the Raspberry Pi's MIPI-CSI port, other MIPI-CSI ports from other coprocessors may not work. + - Ethernet port for networking -Coprocessor Recommendations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +### Coprocessor Recommendations When selecting a coprocessor, it is important to consider various factors, particularly when it comes to AprilTag detection. Opting for a coprocessor with a more powerful CPU can generally result in higher FPS AprilTag detection, leading to more accurate pose estimation. However, it is important to note that there is a point of diminishing returns, where the benefits of a more powerful CPU may not outweigh the additional cost. Below is a list of supported hardware, along with some notes on each. -* Orange Pi 5 ($99) - * This is the recommended coprocessor for most teams. It has a powerful CPU that can handle AprilTag detection at high FPS, and is relatively cheap compared to processors of a similar power. -* Raspberry Pi 4/5 ($55-$80) - * This is the recommended coprocessor for teams on a budget. It has a less powerful CPU than the Orange Pi 5, but is still capable of running PhotonVision at a reasonable FPS. -* Mini PCs (such as Beelink N5095) - * This coprocessor will likely have similar performance to the Orange Pi 5 but has a higher performance ceiling (when using more powerful CPUs). Do note that this would require extra effort to wire to the robot / get set up. More information can be found in the set up guide `here. `_ -* Other coprocessors can be used but may require some extra work / command line usage in order to get it working properly. +- Orange Pi 5 (\$99) + - This is the recommended coprocessor for most teams. It has a powerful CPU that can handle AprilTag detection at high FPS, and is relatively cheap compared to processors of a similar power. +- Raspberry Pi 4/5 (\$55-\$80) + - This is the recommended coprocessor for teams on a budget. It has a less powerful CPU than the Orange Pi 5, but is still capable of running PhotonVision at a reasonable FPS. +- Mini PCs (such as Beelink N5095) + - This coprocessor will likely have similar performance to the Orange Pi 5 but has a higher performance ceiling (when using more powerful CPUs). Do note that this would require extra effort to wire to the robot / get set up. More information can be found in the set up guide [here.](https://docs.google.com/document/d/1lOSzG8iNE43cK-PgJDDzbwtf6ASyf4vbW8lQuFswxzw/edit?usp=drivesdk) +- Other coprocessors can be used but may require some extra work / command line usage in order to get it working properly. -Choosing a Camera ------------------ +## Choosing a Camera PhotonVision works with Pi Cameras and most USB Cameras, the recommendations below are known to be working and have been tested. Other cameras such as webcams, virtual cameras, etc. are not officially supported and may not work. It is important to note that fisheye cameras should only be used as a driver camera and not for detecting targets. -PhotonVision relies on `CSCore `_ to detect and process cameras, so camera support is determined based off compatibility with CScore along with native support for the camera within your OS (ex. `V4L compatibility `_ if using a Linux machine like a Raspberry Pi). +PhotonVision relies on [CSCore](https://github.com/wpilibsuite/allwpilib/tree/main/cscore) to detect and process cameras, so camera support is determined based off compatibility with CScore along with native support for the camera within your OS (ex. [V4L compatibility](https://en.wikipedia.org/wiki/Video4Linux) if using a Linux machine like a Raspberry Pi). -.. note:: - Logitech Cameras and integrated laptop cameras will not work with PhotonVision due to oddities with their drivers. We recommend using a different camera. +:::{note} +Logitech Cameras and integrated laptop cameras will not work with PhotonVision due to oddities with their drivers. We recommend using a different camera. +::: -.. note:: - We do not currently support the usage of two of the same camera on the same coprocessor. You can only use two or more cameras if they are of different models or they are from Arducam, which has a `tool that allows for cameras to be renamed `_. +:::{note} +We do not currently support the usage of two of the same camera on the same coprocessor. You can only use two or more cameras if they are of different models or they are from Arducam, which has a [tool that allows for cameras to be renamed](https://docs.arducam.com/UVC-Camera/Serial-Number-Tool-Guide/). +::: + +### Recommended Cameras -Recommended Cameras -^^^^^^^^^^^^^^^^^^^ For colored shape detection, any non-fisheye camera supported by PhotonVision will work. We recommend the Pi Camera V1 or a high fps USB camera. For driver camera, we recommend a USB camera with a fisheye lens, so your driver can see more of the field. For AprilTag detection, we recommend you use a global shutter camera that has ~100 degree diagonal FOV. This will allow you to see more AprilTags in frame, and will allow for more accurate pose estimation. You also want a camera that supports high FPS, as this will allow you to update your pose estimator at a higher frequency. -* Recommendations For AprilTag Detection - * Arducam USB OV9281 - * This is the recommended camera for AprilTag detection as it is a high FPS, global shutter camera USB camera that has a ~70 degree FOV. - * Innomaker OV9281 - * Spinel AR0144 - * Pi Camera Module V1 - * The V1 is strongly preferred over the V2 due to the V2 having undesirable FOV choices +- Recommendations For AprilTag Detection + - Arducam USB OV9281 + - This is the recommended camera for AprilTag detection as it is a high FPS, global shutter camera USB camera that has a ~70 degree FOV. + - Innomaker OV9281 + - Spinel AR0144 + - Pi Camera Module V1 + - The V1 is strongly preferred over the V2 due to the V2 having undesirable FOV choices + +### AprilTags and Motion Blur -AprilTags and Motion Blur -^^^^^^^^^^^^^^^^^^^^^^^^^ When detecting AprilTags, you want to reduce the "motion blur" as much as possible. Motion blur is the visual streaking/smearing on the camera stream as a result of movement of the camera or object of focus. You want to mitigate this as much as possible because your robot is constantly moving and you want to be able to read as many tags as you possibly can. The possible solutions to this include: 1. Cranking your exposure as low as it goes and increasing your gain/brightness. This will decrease the effects of motion blur and increase FPS. 2. Using a global shutter (as opposed to rolling shutter) camera. This should eliminate most, if not all motion blur. 3. Only rely on tags when not moving. -.. image:: images/motionblur.gif - :align: center - -Using Multiple Cameras -^^^^^^^^^^^^^^^^^^^^^^ +```{image} images/motionblur.gif +:align: center +``` -Using multiple cameras on your robot will help you detect more AprilTags at once and improve your pose estimation as a result. In order to use multiple cameras, you will need to create multiple PhotonPoseEstimators and add all of their measurements to a single drivetrain pose estimator. Please note that the accuracy of your robot to camera transform is especially important when using multiple cameras as any error in the transform will cause your pose estimations to "fight" each other. For more information, see :ref:`the programming reference. `. +### Using Multiple Cameras +Using multiple cameras on your robot will help you detect more AprilTags at once and improve your pose estimation as a result. In order to use multiple cameras, you will need to create multiple PhotonPoseEstimators and add all of their measurements to a single drivetrain pose estimator. Please note that the accuracy of your robot to camera transform is especially important when using multiple cameras as any error in the transform will cause your pose estimations to "fight" each other. For more information, see {ref}`the programming reference. `. -Performance Matrix ------------------- +## Performance Matrix -.. raw:: html +```{raw} html + - + - - - + +``` Please submit performance data to be added to the matrix here: -.. raw:: html - - +```{raw} html + - + - + +``` diff --git a/docs/source/docs/installation/index.rst b/docs/source/docs/installation/index.md similarity index 52% rename from docs/source/docs/installation/index.rst rename to docs/source/docs/installation/index.md index 3039ceb37e..fe42022db1 100644 --- a/docs/source/docs/installation/index.rst +++ b/docs/source/docs/installation/index.md @@ -1,40 +1,38 @@ -Installation & Setup -==================== +# Installation & Setup This page will help you install PhotonVision on your coprocessor, wire it, and properly setup the networking in order to start tracking targets. - -Step 1: Software Install ------------------------- +## Step 1: Software Install This section will walk you through how to install PhotonVision on your coprocessor. Your coprocessor is the device that has the camera and you are using to detect targets (ex. if you are using a Limelight / Raspberry Pi, that is your coprocessor and you should follow those instructions). -.. warning:: You only need to install PhotonVision on the coprocessor/device that is being used to detect targets, you do NOT need to install it on the device you use to view the webdashboard. All you need to view the webdashboard is for a device to be on the same network as your vision coprocessor and an internet browser. - -.. toctree:: - :maxdepth: 3 +:::{warning} +You only need to install PhotonVision on the coprocessor/device that is being used to detect targets, you do NOT need to install it on the device you use to view the webdashboard. All you need to view the webdashboard is for a device to be on the same network as your vision coprocessor and an internet browser. +::: - sw_install/index - updating +```{toctree} +:maxdepth: 3 +sw_install/index +updating +``` -Step 2: Wiring --------------- +## Step 2: Wiring This section will walk you through how to wire your coprocessor to get power. -.. toctree:: - :maxdepth: 1 - - wiring +```{toctree} +:maxdepth: 1 +wiring +``` -Step 3: Networking ------------------- +## Step 3: Networking This section will walk you though how to connect your coprocessor to a network. This section is very important (and easy to get wrong), so we recommend you read it thoroughly. -.. toctree:: - :maxdepth: 1 +```{toctree} +:maxdepth: 1 - networking +networking +``` diff --git a/docs/source/docs/installation/networking.rst b/docs/source/docs/installation/networking.md similarity index 54% rename from docs/source/docs/installation/networking.rst rename to docs/source/docs/installation/networking.md index 1d2ec305ba..efe8762c1e 100644 --- a/docs/source/docs/installation/networking.rst +++ b/docs/source/docs/installation/networking.md @@ -1,47 +1,50 @@ -Networking -========== +# Networking -Physical Networking -------------------- -.. note:: When using PhotonVision off robot, you *MUST* plug the coprocessor into a physical router/radio. You can then connect your laptop/device used to view the webdashboard to the same network. Any other networking setup will not work and will not be supported in any capacity. +## Physical Networking + +:::{note} +When using PhotonVision off robot, you *MUST* plug the coprocessor into a physical router/radio. You can then connect your laptop/device used to view the webdashboard to the same network. Any other networking setup will not work and will not be supported in any capacity. +::: After imaging your coprocessor, run an ethernet cable from your coprocessor to a router/radio and power on your coprocessor by plugging it into the wall. Then connect whatever device you're using to view the webdashboard to the same network and navigate to photonvision.local:5800. -PhotonVision *STRONGLY* recommends the usage of a network switch on your robot. This is because the second radio port on the current FRC radios is known to be buggy and cause frequent connection issues that are detrimental during competition. An in-depth guide on how to install a network switch can be found `on FRC 900's website `_. +PhotonVision *STRONGLY* recommends the usage of a network switch on your robot. This is because the second radio port on the current FRC radios is known to be buggy and cause frequent connection issues that are detrimental during competition. An in-depth guide on how to install a network switch can be found [on FRC 900's website](https://team900.org/blog/ZebraSwitch/). +```{image} images/networking-diagram.png +:alt: Correctly set static IP +``` -.. image:: images/networking-diagram.png - :alt: Correctly set static IP +## Digital Networking -Digital Networking ------------------- PhotonVision *STRONGLY* recommends the usage of Static IPs as it increases reliability on the field and when using PhotonVision in general. To properly set up your static IP, follow the steps below: -.. warning:: Only use a static IP when connected to the **robot radio**, and never when testing at home, unless you are well versed in networking or have the relevant "know how". +:::{warning} +Only use a static IP when connected to the **robot radio**, and never when testing at home, unless you are well versed in networking or have the relevant "know how". +::: 1. Ensure your robot is on and you are connected to the robot network. -2. Navigate to ``photonvision.local:5800`` (this may be different if you are using a Gloworm / Limelight) in your browser. +2. Navigate to `photonvision.local:5800` (this may be different if you are using a Gloworm / Limelight) in your browser. 3. Open the settings tab on the left pane. 4. Under the Networking section, set your team number. 5. Change your IP to Static. -6. Set your coprocessor's IP address to “10.TE.AM.11”. More information on IP format can be found `here `_. - +6. Set your coprocessor's IP address to “10.TE.AM.11”. More information on IP format can be found [here](https://docs.wpilib.org/en/stable/docs/networking/networking-introduction/ip-configurations.html#on-the-field-static-configuration). 7. Click the “Save” button. -8. Set your roboRIO to the following static IP address: “10.TE.AM.2”. This can be done via the `roboRIO web dashboard `_. +8. Set your roboRIO to the following static IP address: “10.TE.AM.2”. This can be done via the [roboRIO web dashboard](https://docs.wpilib.org/en/stable/docs/software/roborio-info/roborio-web-dashboard.html#roborio-web-dashboard). -Power-cycle your robot and then you will now be access the PhotonVision dashboard at ``10.TE.AM.11:5800``. +Power-cycle your robot and then you will now be access the PhotonVision dashboard at `10.TE.AM.11:5800`. -.. image:: images/static.png - :alt: Correctly set static IP +```{image} images/static.png +:alt: Correctly set static IP +``` -Port Forwarding ---------------- +## Port Forwarding -If you would like to access your Ethernet-connected vision device from a computer when tethered to the USB port on the roboRIO, you can use `WPILib's `_ ``PortForwarder``. +If you would like to access your Ethernet-connected vision device from a computer when tethered to the USB port on the roboRIO, you can use [WPILib's](https://docs.wpilib.org/en/stable/docs/networking/networking-utilities/portforwarding.html) `PortForwarder`. +```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java PortForwarder.add(5800, "photonvision.local", 5800); @@ -49,9 +52,15 @@ If you would like to access your Ethernet-connected vision device from a compute wpi::PortForwarder::GetInstance().Add(5800, "photonvision.local", 5800); -.. note:: The address in the code above (``photonvision.local``) is the hostname of the coprocessor. This can be different depending on your hardware, and can be checked in the settings tab under "hostname". + .. code-block:: Python + + # Coming Soon! +``` + +:::{note} +The address in the code above (`photonvision.local`) is the hostname of the coprocessor. This can be different depending on your hardware, and can be checked in the settings tab under "hostname". +::: -Camera Stream Ports -------------------- +## Camera Stream Ports The camera streams start at they begin at 1181 with two ports for each camera (ex. 1181 and 1182 for camera one, 1183 and 1184 for camera two, etc.). The easiest way to identify the port of the camera that you want is by double clicking on the stream, which opens it in a separate page. The port will be listed below the stream. diff --git a/docs/source/docs/installation/sw_install/advanced-cmd.md b/docs/source/docs/installation/sw_install/advanced-cmd.md new file mode 100644 index 0000000000..ba974218b4 --- /dev/null +++ b/docs/source/docs/installation/sw_install/advanced-cmd.md @@ -0,0 +1,56 @@ +# Advanced Command Line Usage + +PhotonVision exposes some command line options which may be useful for customizing execution on Debian-based installations. + +## Running a JAR File + +Assuming `java` has been installed, and the appropriate environment variables have been set upon installation (a package manager like `apt` should automatically set these), you can use `java -jar` to run a JAR file. If you downloaded the latest stable JAR of PhotonVision from the [GitHub releases page](https://github.com/PhotonVision/photonvision/releases), you can run the following to start the program: + +```bash +java -jar /path/to/photonvision/photonvision.jar +``` + +## Updating a JAR File + +When you need to update your JAR file, run the following: + +```bash +wget https://git.io/JqkQ9 -O update.sh +sudo chmod +x update.sh +sudo ./update.sh +sudo reboot now +``` + +## Creating a `systemd` Service + +You can also create a systemd service that will automatically run on startup. To do so, first navigate to `/lib/systemd/system`. Create a file called `photonvision.service` (or name it whatever you want) using `touch photonvision.service`. Then open this file in the editor of your choice and paste the following text: + +``` +[Unit] +Description=Service that runs PhotonVision + +[Service] +WorkingDirectory=/path/to/photonvision +# Optional: run photonvision at "nice" -10, which is higher priority than standard +# Nice=-10 +ExecStart=/usr/bin/java -jar /path/to/photonvision/photonvision.jar + +[Install] +WantedBy=multi-user.target +``` + +Then copy the `.service` file to `/etc/systemd/system/` using `cp photonvision.service /etc/systemd/system/photonvision.service`. Then modify the file to have `644` permissions using `chmod 644 /etc/systemd/system/photonvision.service`. + +:::{note} +Many ARM processors have a big.LITTLE architecture where some of the CPU cores are more powerful than others. On this type of architecture, you may get more consistent performance by limiting which cores PhotonVision can use. To do this, add the parameter `AllowedCPUs` to the systemd service file in the `[Service]` section. + +For instance, for an Orange Pi 5, cores 4 through 7 are the fast ones, and you can target those cores with the line `AllowedCPUs=4-7`. +::: + +## Installing the `systemd` Service + +To install the service, simply run `systemctl enable photonvision.service`. + +:::{note} +It is recommended to reload configurations by running `systemctl daemon-reload`. +::: diff --git a/docs/source/docs/installation/sw_install/advanced-cmd.rst b/docs/source/docs/installation/sw_install/advanced-cmd.rst deleted file mode 100644 index a19371cf38..0000000000 --- a/docs/source/docs/installation/sw_install/advanced-cmd.rst +++ /dev/null @@ -1,53 +0,0 @@ -Advanced Command Line Usage -=========================== -PhotonVision exposes some command line options which may be useful for customizing execution on Debian-based installations. - -Running a JAR File ------------------- -Assuming ``java`` has been installed, and the appropriate environment variables have been set upon installation (a package manager like ``apt`` should automatically set these), you can use ``java -jar`` to run a JAR file. If you downloaded the latest stable JAR of PhotonVision from the `GitHub releases page `_, you can run the following to start the program: - -.. code-block:: bash - - java -jar /path/to/photonvision/photonvision.jar - -Updating a JAR File -------------------- -When you need to update your JAR file, run the following: - -.. code-block:: bash - - wget https://git.io/JqkQ9 -O update.sh - sudo chmod +x update.sh - sudo ./update.sh - sudo reboot now - -Creating a ``systemd`` Service ------------------------------- -You can also create a systemd service that will automatically run on startup. To do so, first navigate to ``/lib/systemd/system``. Create a file called ``photonvision.service`` (or name it whatever you want) using ``touch photonvision.service``. Then open this file in the editor of your choice and paste the following text: - -.. code-block:: - - [Unit] - Description=Service that runs PhotonVision - - [Service] - WorkingDirectory=/path/to/photonvision - # Optional: run photonvision at "nice" -10, which is higher priority than standard - # Nice=-10 - ExecStart=/usr/bin/java -jar /path/to/photonvision/photonvision.jar - - [Install] - WantedBy=multi-user.target - -Then copy the ``.service`` file to ``/etc/systemd/system/`` using ``cp photonvision.service /etc/systemd/system/photonvision.service``. Then modify the file to have ``644`` permissions using ``chmod 644 /etc/systemd/system/photonvision.service``. - -.. note:: - Many ARM processors have a big.LITTLE architecture where some of the CPU cores are more powerful than others. On this type of architecture, you may get more consistent performance by limiting which cores PhotonVision can use. To do this, add the parameter ``AllowedCPUs`` to the systemd service file in the ``[Service]`` section. - - For instance, for an Orange Pi 5, cores 4 through 7 are the fast ones, and you can target those cores with the line ``AllowedCPUs=4-7``. - -Installing the ``systemd`` Service ----------------------------------- -To install the service, simply run ``systemctl enable photonvision.service``. - -.. note:: It is recommended to reload configurations by running ``systemctl daemon-reload``. diff --git a/docs/source/docs/installation/sw_install/gloworm.md b/docs/source/docs/installation/sw_install/gloworm.md new file mode 100644 index 0000000000..2a52497660 --- /dev/null +++ b/docs/source/docs/installation/sw_install/gloworm.md @@ -0,0 +1,60 @@ +# Gloworm Installation + +While not currently in production, PhotonVision still supports Gloworm vision processing cameras. + +## Downloading the Gloworm Image + +Download the latest [Gloworm/Limelight release of PhotonVision](https://github.com/photonvision/photonvision/releases); the image will be suffixed with "image_limelight2.xz". You do not need to extract the downloaded archive. + +## Flashing the Gloworm Image + +Plug a USB C cable from your computer into the USB C port on Gloworm labeled with a download icon. + +Use the 1.18.11 version of [Balena Etcher](https://github.com/balena-io/etcher/releases/tag/v1.18.11) to flash an image onto the coprocessor. + +Run BalenaEtcher as an administrator. Select the downloaded `.zip` file. + +Select the compute module. If it doesn't show up after 30s try using another USB port, initialization may take a while. If prompted, install the recommended missing drivers. + +Hit flash. Wait for flashing to complete, then disconnect your USB C cable. + +:::{warning} +Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Gloworm. Updating to 1.18.11 will fix this issue. +::: + +## Final Steps + +Power your device per its documentation and connect it to a robot network. + +You should be able to locate the camera at `http://photonvision.local:5800/` in your browser on your computer when connected to the robot. + +## Troubleshooting/Setting a Static IP + +A static IP address may be used as an alternative to the mDNS `photonvision.local` address. + +Download and run [Angry IP Scanner](https://angryip.org/download/#windows) to find PhotonVision/your coprocessor on your network. + +```{image} images/angryIP.png +``` + +Once you find it, set the IP to a desired {ref}`static IP in PhotonVision. ` + +## Updating PhotonVision + +Download the latest stable .jar from [the releases page](https://github.com/PhotonVision/photonvision/releases), go to the settings tab, and upload the .jar using the Offline Update button. + +:::{note} +If you are updating PhotonVision on a Gloworm/Limelight, download the LinuxArm64 .jar file. +::: + +As an alternative option - Export your settings, reimage your coprocessor using the instructions above, and import your settings back in. + +## Hardware Troubleshooting + +To turn the LED lights off or on you need to modify the `ledMode` network tables entry or the `camera.setLED` of PhotonLib. + +## Support Links + +- [Website/Documentation](https://photonvision.github.io/gloworm-docs/docs/quickstart/#finding-gloworm) (Note: Gloworm is no longer in production) +- [Image](https://github.com/gloworm-vision/pi-img-updator/releases) +- [Discord](https://discord.com/invite/DncQRky) diff --git a/docs/source/docs/installation/sw_install/gloworm.rst b/docs/source/docs/installation/sw_install/gloworm.rst deleted file mode 100644 index 2dfc3454ab..0000000000 --- a/docs/source/docs/installation/sw_install/gloworm.rst +++ /dev/null @@ -1,59 +0,0 @@ -Gloworm Installation -==================== -While not currently in production, PhotonVision still supports Gloworm vision processing cameras. - -Downloading the Gloworm Image ------------------------------ -Download the latest `Gloworm/Limelight release of PhotonVision `_; the image will be suffixed with "image_limelight2.xz". You do not need to extract the downloaded archive. - -Flashing the Gloworm Image --------------------------- -Plug a USB C cable from your computer into the USB C port on Gloworm labeled with a download icon. - -Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto the coprocessor. - -Run BalenaEtcher as an administrator. Select the downloaded ``.zip`` file. - -Select the compute module. If it doesn't show up after 30s try using another USB port, initialization may take a while. If prompted, install the recommended missing drivers. - -Hit flash. Wait for flashing to complete, then disconnect your USB C cable. - -.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Gloworm. Updating to 1.18.11 will fix this issue. - -Final Steps ------------ -Power your device per its documentation and connect it to a robot network. - -You should be able to locate the camera at ``http://photonvision.local:5800/`` in your browser on your computer when connected to the robot. - -Troubleshooting/Setting a Static IP ------------------------------------ -A static IP address may be used as an alternative to the mDNS ``photonvision.local`` address. - -Download and run `Angry IP Scanner `_ to find PhotonVision/your coprocessor on your network. - -.. image:: images/angryIP.png - -Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. ` - -Updating PhotonVision ---------------------- -Download the latest stable .jar from `the releases page `_, go to the settings tab, and upload the .jar using the Offline Update button. - -.. note:: If you are updating PhotonVision on a Gloworm/Limelight, download the LinuxArm64 .jar file. - -As an alternative option - Export your settings, reimage your coprocessor using the instructions above, and import your settings back in. - -Hardware Troubleshooting ------------------------- -To turn the LED lights off or on you need to modify the ``ledMode`` network tables entry or the ``camera.setLED`` of PhotonLib. - - -Support Links -------------- - -* `Website/Documentation `__ (Note: Gloworm is no longer in production) - -* `Image `__ - -* `Discord `__ diff --git a/docs/source/docs/installation/sw_install/index.md b/docs/source/docs/installation/sw_install/index.md new file mode 100644 index 0000000000..86ad5d88b3 --- /dev/null +++ b/docs/source/docs/installation/sw_install/index.md @@ -0,0 +1,33 @@ +# Software Installation + +## Supported Coprocessors + +```{toctree} +:maxdepth: 1 + +raspberry-pi +limelight +orange-pi +snakeyes +``` + +## Desktop Environments + +```{toctree} +:maxdepth: 1 + +windows-pc +linux-pc +mac-os +``` + +## Other + +```{toctree} +:maxdepth: 1 + +other-coprocessors +advanced-cmd +romi +gloworm +``` diff --git a/docs/source/docs/installation/sw_install/index.rst b/docs/source/docs/installation/sw_install/index.rst deleted file mode 100644 index b9503adc41..0000000000 --- a/docs/source/docs/installation/sw_install/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -Software Installation -===================== - -Supported Coprocessors ----------------------- - -.. toctree:: - :maxdepth: 1 - - raspberry-pi - limelight - orange-pi - snakeyes - -Desktop Environments ----------------------- - -.. toctree:: - :maxdepth: 1 - - windows-pc - linux-pc - mac-os - -Other ------ - -.. toctree:: - :maxdepth: 1 - - other-coprocessors - advanced-cmd - romi - gloworm diff --git a/docs/source/docs/installation/sw_install/limelight.md b/docs/source/docs/installation/sw_install/limelight.md new file mode 100644 index 0000000000..e819fd6a48 --- /dev/null +++ b/docs/source/docs/installation/sw_install/limelight.md @@ -0,0 +1,24 @@ +# Limelight Installation + +## Imaging + +Limelight imaging is a very similar process to Gloworm, but with extra steps. + +### Base Install Steps + +Due to the similarities in hardware, follow the {ref}`Gloworm install instructions `. + +## Hardware-Specific Steps + +Download the hardwareConfig.json file for the version of your Limelight: + +- {download}`Limelight Version 2 `. +- {download}`Limelight Version 2+ `. + +:::{note} +No hardware config is provided for the Limelight 3 as AprilTags do not require the LEDs (meaning nobody has reverse-engineered what I/O pins drive the LEDs) and the camera FOV is determined as part of calibration. +::: + +{ref}`Import the hardwareConfig.json file `. Again, this is **REQUIRED** or target measurements will be incorrect, and LEDs will not work. + +After installation you should be able to [locate the camera](https://photonvision.github.io/gloworm-docs/docs/quickstart/#finding-gloworm) at: `http://photonvision.local:5800/` (not `gloworm.local`, as previously) diff --git a/docs/source/docs/installation/sw_install/limelight.rst b/docs/source/docs/installation/sw_install/limelight.rst deleted file mode 100644 index 686dc1044b..0000000000 --- a/docs/source/docs/installation/sw_install/limelight.rst +++ /dev/null @@ -1,25 +0,0 @@ -Limelight Installation -====================== - -Imaging -------- -Limelight imaging is a very similar process to Gloworm, but with extra steps. - - -Base Install Steps -^^^^^^^^^^^^^^^^^^ -Due to the similarities in hardware, follow the :ref:`Gloworm install instructions `. - - -Hardware-Specific Steps ------------------------ -Download the hardwareConfig.json file for the version of your Limelight: - -- :download:`Limelight Version 2 `. -- :download:`Limelight Version 2+ `. - -.. note:: No hardware config is provided for the Limelight 3 as AprilTags do not require the LEDs (meaning nobody has reverse-engineered what I/O pins drive the LEDs) and the camera FOV is determined as part of calibration. - -:ref:`Import the hardwareConfig.json file `. Again, this is **REQUIRED** or target measurements will be incorrect, and LEDs will not work. - -After installation you should be able to `locate the camera `_ at: ``http://photonvision.local:5800/`` (not ``gloworm.local``, as previously) diff --git a/docs/source/docs/installation/sw_install/linux-pc.md b/docs/source/docs/installation/sw_install/linux-pc.md new file mode 100644 index 0000000000..3c6e63e4a7 --- /dev/null +++ b/docs/source/docs/installation/sw_install/linux-pc.md @@ -0,0 +1,47 @@ +# Linux PC Installation + +PhotonVision may be run on a Debian-based Linux Desktop PC for basic testing and evaluation. + +:::{note} +You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). +::: + +## Installing Java + +PhotonVision requires a JDK installed and on the system path. JDK 11 is needed (different versions will not work). If you don't have JDK 11 already, run the following to install it: + +``` +$ sudo apt-get install openjdk-11-jdk +``` + +:::{warning} +Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. +::: + +## Downloading the Latest Stable Release of PhotonVision + +Go to the [GitHub releases page](https://github.com/PhotonVision/photonvision/releases) and download the relevant .jar file for your coprocessor. + +:::{note} +If your coprocessor has a 64 bit ARM based CPU architecture (OrangePi, Raspberry Pi, etc.), download the LinuxArm64.jar file. + +If your coprocessor has an 64 bit x86 based CPU architecture (Mini PC, laptop, etc.), download the Linuxx64.jar file. +::: + +:::{warning} +Be careful to pick the latest stable release. "Draft" or "Pre-Release" versions are not stable and often have bugs. +::: + +## Running PhotonVision + +To run PhotonVision, open a terminal window of your choice and run the following command: + +``` +$ java -jar /path/to/photonvision/photonvision-xxx.jar +``` + +If your computer has a compatible webcam connected, PhotonVision should startup without any error messages. If there are error messages, your webcam isn't supported or another issue has occurred. If it is the latter, please open an issue on the [PhotonVision issues page](https://github.com/PhotonVision/photonvision/issues). + +## Accessing the PhotonVision Interface + +Once the Java backend is up and running, you can access the main vision interface by navigating to `localhost:5800` inside your browser. diff --git a/docs/source/docs/installation/sw_install/linux-pc.rst b/docs/source/docs/installation/sw_install/linux-pc.rst deleted file mode 100644 index 2899787547..0000000000 --- a/docs/source/docs/installation/sw_install/linux-pc.rst +++ /dev/null @@ -1,41 +0,0 @@ -Linux PC Installation -===================== -PhotonVision may be run on a Debian-based Linux Desktop PC for basic testing and evaluation. - -.. note:: You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). - -Installing Java ---------------- -PhotonVision requires a JDK installed and on the system path. JDK 11 is needed (different versions will not work). If you don't have JDK 11 already, run the following to install it: - -.. code-block:: - - $ sudo apt-get install openjdk-11-jdk - -.. warning:: Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. - -Downloading the Latest Stable Release of PhotonVision ------------------------------------------------------ -Go to the `GitHub releases page `_ and download the relevant .jar file for your coprocessor. - -.. note:: - If your coprocessor has a 64 bit ARM based CPU architecture (OrangePi, Raspberry Pi, etc.), download the LinuxArm64.jar file. - - If your coprocessor has an 64 bit x86 based CPU architecture (Mini PC, laptop, etc.), download the Linuxx64.jar file. - - -.. warning:: Be careful to pick the latest stable release. "Draft" or "Pre-Release" versions are not stable and often have bugs. - -Running PhotonVision --------------------- -To run PhotonVision, open a terminal window of your choice and run the following command: - -.. code-block:: - - $ java -jar /path/to/photonvision/photonvision-xxx.jar - -If your computer has a compatible webcam connected, PhotonVision should startup without any error messages. If there are error messages, your webcam isn't supported or another issue has occurred. If it is the latter, please open an issue on the `PhotonVision issues page `_. - -Accessing the PhotonVision Interface ------------------------------------- -Once the Java backend is up and running, you can access the main vision interface by navigating to ``localhost:5800`` inside your browser. diff --git a/docs/source/docs/installation/sw_install/mac-os.md b/docs/source/docs/installation/sw_install/mac-os.md new file mode 100644 index 0000000000..8c2c7b342e --- /dev/null +++ b/docs/source/docs/installation/sw_install/mac-os.md @@ -0,0 +1,53 @@ +# Mac OS Installation + +:::{warning} +Due to current [cscore](https://github.com/wpilibsuite/allwpilib/tree/main/cscore) restrictions, the PhotonVision server backend may have issues running macOS. +::: + +:::{note} +You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). +::: + +VERY Limited macOS support is available. + +## Installing Java + +PhotonVision requires a JDK installed and on the system path. JDK 11 is needed (different versions will not work). You may already have this if you have installed WPILib. If not, [download and install it from here](https://adoptium.net/temurin/releases?version=11). + +:::{warning} +Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. +::: + +## Downloading the Latest Stable Release of PhotonVision + +Go to the [GitHub releases page](https://github.com/PhotonVision/photonvision/releases) and download the relevant .jar file for your coprocessor. + +:::{note} +If you have an M1/M2 Mac, download the macarm64.jar file. + +If you have an Intel based Mac, download the macx64.jar file. +::: + +:::{warning} +Be careful to pick the latest stable release. "Draft" or "Pre-Release" versions are not stable and often have bugs. +::: + +## Running PhotonVision + +To run PhotonVision, open a terminal window of your choice and run the following command: + +``` +$ java -jar /path/to/photonvision/photonvision-xxx.jar +``` + +:::{warning} +Due to current [cscore](https://github.com/wpilibsuite/allwpilib/tree/main/cscore) restrictions, the PhotonVision using test mode is all that is known to work currently. +::: + +## Accessing the PhotonVision Interface + +Once the Java backend is up and running, you can access the main vision interface by navigating to `localhost:5800` inside your browser. + +:::{warning} +Due to current [cscore](https://github.com/wpilibsuite/allwpilib/tree/main/cscore) restrictions, it is unlikely any streams will open from real webcams. +::: diff --git a/docs/source/docs/installation/sw_install/mac-os.rst b/docs/source/docs/installation/sw_install/mac-os.rst deleted file mode 100644 index cdb19e5b04..0000000000 --- a/docs/source/docs/installation/sw_install/mac-os.rst +++ /dev/null @@ -1,41 +0,0 @@ -Mac OS Installation -=================== - -.. warning:: Due to current `cscore `_ restrictions, the PhotonVision server backend may have issues running macOS. - -.. note:: You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). - -VERY Limited macOS support is available. - -Installing Java ---------------- -PhotonVision requires a JDK installed and on the system path. JDK 11 is needed (different versions will not work). You may already have this if you have installed WPILib. If not, `download and install it from here `_. - -.. warning:: Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. - -Downloading the Latest Stable Release of PhotonVision ------------------------------------------------------ -Go to the `GitHub releases page `_ and download the relevant .jar file for your coprocessor. - -.. note:: - If you have an M1/M2 Mac, download the macarm64.jar file. - - If you have an Intel based Mac, download the macx64.jar file. - -.. warning:: Be careful to pick the latest stable release. "Draft" or "Pre-Release" versions are not stable and often have bugs. - -Running PhotonVision --------------------- -To run PhotonVision, open a terminal window of your choice and run the following command: - -.. code-block:: - - $ java -jar /path/to/photonvision/photonvision-xxx.jar - -.. warning:: Due to current `cscore `_ restrictions, the PhotonVision using test mode is all that is known to work currently. - -Accessing the PhotonVision Interface ------------------------------------- -Once the Java backend is up and running, you can access the main vision interface by navigating to ``localhost:5800`` inside your browser. - -.. warning:: Due to current `cscore `_ restrictions, it is unlikely any streams will open from real webcams. diff --git a/docs/source/docs/installation/sw_install/orange-pi.md b/docs/source/docs/installation/sw_install/orange-pi.md new file mode 100644 index 0000000000..b03c7ffabe --- /dev/null +++ b/docs/source/docs/installation/sw_install/orange-pi.md @@ -0,0 +1,39 @@ +# Orange Pi Installation + +## Downloading Linux Image + +Starting in 2024, PhotonVision provides pre-configured system images for Orange Pi 5 devices. Download the latest release of the PhotonVision Orange Pi 5 image (.xz file suffixed with `orangepi5.xz`) from the [releases page](https://github.com/PhotonVision/photonvision/releases). You do not need to extract the downloaded archive file. This image is configured with a `pi` user with password `raspberry`. + +For an Orange Pi 4, download the latest release of the Armbian Bullseye CLI image from [here](https://armbian.tnahosting.net/archive/orangepi4/archive/Armbian_23.02.2_Orangepi4_bullseye_current_5.15.93.img.xz). + +## Flashing the Pi Image + +An 8GB or larger SD card is recommended. + +Use the 1.18.11 version of [Balena Etcher](https://github.com/balena-io/etcher/releases/tag/v1.18.11) to flash an image onto a Orange Pi. Select the downloaded image file, select your microSD card, and flash. + +For more detailed instructions on using Etcher, please see the [Etcher website](https://www.balena.io/etcher/). + +:::{warning} +Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Orange Pi. Updating to 1.18.11 will fix this issue. +::: + +Alternatively, you can use the [Raspberry Pi Imager](https://www.raspberrypi.com/software/) to flash the image. + +Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. + +:::{note} +If you are working on Linux, "dd" can be used in the command line to flash an image. +::: + +If you're using an Orange Pi 5, that's it! Orange Pi 4 users will need to install PhotonVision (see below). + +### Initial User Setup (Orange Pi 4 Only) + +Insert the flashed microSD card into your Orange Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. + +Plug your Orange Pi into a display via HDMI and plug in a keyboard via USB once its powered up. For an Orange Pi 4, complete the initial set up which involves creating a root password and adding a user, as well as setting localization language. Additionally, choose “bash” when prompted. + +## Installing PhotonVision (Orange Pi 4 Only) + +From here, you can follow {ref}`this guide `. diff --git a/docs/source/docs/installation/sw_install/orange-pi.rst b/docs/source/docs/installation/sw_install/orange-pi.rst deleted file mode 100644 index c99805c1f7..0000000000 --- a/docs/source/docs/installation/sw_install/orange-pi.rst +++ /dev/null @@ -1,37 +0,0 @@ -Orange Pi Installation -====================== - -Downloading Linux Image ------------------------ - -Starting in 2024, PhotonVision provides pre-configured system images for Orange Pi 5 devices. Download the latest release of the PhotonVision Orange Pi 5 image (.xz file suffixed with ``orangepi5.xz``) from the `releases page `_. You do not need to extract the downloaded archive file. This image is configured with a ``pi`` user with password ``raspberry``. - -For an Orange Pi 4, download the latest release of the Armbian Bullseye CLI image from `here `_. - -Flashing the Pi Image ---------------------- -An 8GB or larger SD card is recommended. - -Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto a Orange Pi. Select the downloaded image file, select your microSD card, and flash. - -For more detailed instructions on using Etcher, please see the `Etcher website `_. - -.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Orange Pi. Updating to 1.18.11 will fix this issue. - -Alternatively, you can use the `Raspberry Pi Imager `_ to flash the image. - -Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. - -.. note:: If you are working on Linux, "dd" can be used in the command line to flash an image. - -If you're using an Orange Pi 5, that's it! Orange Pi 4 users will need to install PhotonVision (see below). - -Initial User Setup (Orange Pi 4 Only) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Insert the flashed microSD card into your Orange Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. - -Plug your Orange Pi into a display via HDMI and plug in a keyboard via USB once its powered up. For an Orange Pi 4, complete the initial set up which involves creating a root password and adding a user, as well as setting localization language. Additionally, choose “bash” when prompted. - -Installing PhotonVision (Orange Pi 4 Only) ------------------------------------------- -From here, you can follow :ref:`this guide `. diff --git a/docs/source/docs/installation/sw_install/other-coprocessors.md b/docs/source/docs/installation/sw_install/other-coprocessors.md new file mode 100644 index 0000000000..c2e0c7ff22 --- /dev/null +++ b/docs/source/docs/installation/sw_install/other-coprocessors.md @@ -0,0 +1,39 @@ +# Other Debian-Based Co-Processor Installation + +:::{warning} +Working with unsupported coprocessors requires some level of "know how" of your system. The install script has only been tested on Debian/Raspberry Pi OS Buster and Ubuntu Bionic. If any issues arise with your specific OS, please open an issue on our [issues page](https://github.com/PhotonVision/photonvision/issues). +::: + +:::{note} +We'd love to have your input! If you get PhotonVision working on another coprocessor, consider documenting your steps and submitting a [docs issue](https://github.com/PhotonVision/photonvision-docs/issues)., [pull request](https://github.com/PhotonVision/photonvision-docs/pulls) , or [ping us on Discord](https://discord.com/invite/wYxTwym). For example, Limelight and Romi install instructions came about because someone spent the time to figure it out, and did a writeup. +::: + +## Installing PhotonVision + +We provide an [install script](https://git.io/JJrEP) for other Debian-based systems (with `apt`) that will automatically install PhotonVision and make sure that it runs on startup. + +```bash +$ wget https://git.io/JJrEP -O install.sh +$ sudo chmod +x install.sh +$ sudo ./install.sh +$ sudo reboot now +``` + +:::{note} +Your co-processor will require an Internet connection for this process to work correctly. +::: + +For installation on any other co-processors, we recommend reading the {ref}`advanced command line documentation `. + +## Updating PhotonVision + +PhotonVision can be updated by downloading the latest jar file, copying it onto the processor, and restarting the service. + +For example, from another computer, run the following commands. Substitute the correct username for "\[user\]" (e.g. Raspberry Pi uses "pi", Orange Pi uses "orangepi".) + +```bash +$ scp [jar name].jar [user]@photonvision.local:~/ +$ ssh [user]@photonvision.local +$ sudo mv [jar name].jar /opt/photonvision/photonvision.jar +$ sudo systemctl restart photonvision.service +``` diff --git a/docs/source/docs/installation/sw_install/other-coprocessors.rst b/docs/source/docs/installation/sw_install/other-coprocessors.rst deleted file mode 100644 index 27125343a4..0000000000 --- a/docs/source/docs/installation/sw_install/other-coprocessors.rst +++ /dev/null @@ -1,36 +0,0 @@ -Other Debian-Based Co-Processor Installation -============================================ - -.. warning:: Working with unsupported coprocessors requires some level of "know how" of your target system. The install script has only been tested on Debian/Raspberry Pi OS Buster and Ubuntu Bionic. If any issues arise with your specific OS, please open an issue on our `issues page `_. - -.. note:: We'd love to have your input! If you get PhotonVision working on another coprocessor, consider documenting your steps and submitting a `docs issue `_., `pull request `_ , or `ping us on Discord `_. For example, Limelight and Romi install instructions came about because someone spent the time to figure it out, and did a writeup. - -Installing PhotonVision ------------------------ - -We provide an `install script `_ for other Debian-based systems (with ``apt``) that will automatically install PhotonVision and make sure that it runs on startup. - -.. code-block:: bash - - $ wget https://git.io/JJrEP -O install.sh - $ sudo chmod +x install.sh - $ sudo ./install.sh - $ sudo reboot now - -.. note:: Your co-processor will require an Internet connection for this process to work correctly. - -For installation on any other co-processors, we recommend reading the :ref:`advanced command line documentation `. - -Updating PhotonVision ---------------------- - -PhotonVision can be updated by downloading the latest jar file, copying it onto the processor, and restarting the service. - -For example, from another computer, run the following commands. Substitute the correct username for "[user]" (e.g. Raspberry Pi uses "pi", Orange Pi uses "orangepi".) - -.. code-block:: bash - - $ scp [jar name].jar [user]@photonvision.local:~/ - $ ssh [user]@photonvision.local - $ sudo mv [jar name].jar /opt/photonvision/photonvision.jar - $ sudo systemctl restart photonvision.service diff --git a/docs/source/docs/installation/sw_install/raspberry-pi.md b/docs/source/docs/installation/sw_install/raspberry-pi.md new file mode 100644 index 0000000000..d8e851d51e --- /dev/null +++ b/docs/source/docs/installation/sw_install/raspberry-pi.md @@ -0,0 +1,50 @@ +# Raspberry Pi Installation + +A Pre-Built Raspberry Pi image is available for ease of installation. + +## Downloading the Pi Image + +Download the latest release of the PhotonVision Raspberry image (.xz file) from the [releases page](https://github.com/PhotonVision/photonvision/releases). You do not need to extract the downloaded ZIP file. + +:::{note} +Make sure you download the image that ends in '-RasberryPi.xz'. +::: + +## Flashing the Pi Image + +An 8GB or larger card is recommended. + +Use the 1.18.11 version of [Balena Etcher](https://github.com/balena-io/etcher/releases/tag/v1.18.11) to flash an image onto a Raspberry Pi. Select the downloaded `.tar.xz` file, select your microSD card, and flash. + +For more detailed instructions on using Etcher, please see the [Etcher website](https://www.balena.io/etcher/). + +:::{warning} +Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Raspberry Pi. Updating to 1.18.11 will fix this issue. +::: + +Alternatively, you can use the [Raspberry Pi Imager](https://www.raspberrypi.com/software/) to flash the image. + +Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. + +If you are using a non-standard Pi Camera connected to the CSI port, {ref}`additional configuration may be required. ` + +## Final Steps + +Insert the flashed microSD card into your Raspberry Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. + +After the initial setup process, your Raspberry Pi should be configured for PhotonVision. You can verify this by making sure your Raspberry Pi and computer are connected to the same network and navigating to `http://photonvision.local:5800` in your browser on your computer. + +## Troubleshooting/Setting a Static IP + +A static IP address may be used as an alternative to the mDNS `photonvision.local` address. + +Download and run [Angry IP Scanner](https://angryip.org/download/#windows) to find PhotonVision/your coprocessor on your network. + +```{image} images/angryIP.png +``` + +Once you find it, set the IP to a desired {ref}`static IP in PhotonVision. ` + +## Updating PhotonVision + +To upgrade a Raspberry Pi device with PhotonVision already installed, follow the {ref}`Raspberry Pi update instructions`. diff --git a/docs/source/docs/installation/sw_install/raspberry-pi.rst b/docs/source/docs/installation/sw_install/raspberry-pi.rst deleted file mode 100644 index 7dd617400c..0000000000 --- a/docs/source/docs/installation/sw_install/raspberry-pi.rst +++ /dev/null @@ -1,46 +0,0 @@ -Raspberry Pi Installation -========================= -A Pre-Built Raspberry Pi image is available for ease of installation. - -Downloading the Pi Image ------------------------- -Download the latest release of the PhotonVision Raspberry image (.xz file) from the `releases page `_. You do not need to extract the downloaded ZIP file. - -.. note:: Make sure you download the image that ends in '-RasberryPi.xz'. - -Flashing the Pi Image ---------------------- -An 8GB or larger card is recommended. - -Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto a Raspberry Pi. Select the downloaded ``.tar.xz`` file, select your microSD card, and flash. - -For more detailed instructions on using Etcher, please see the `Etcher website `_. - -.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Raspberry Pi. Updating to 1.18.11 will fix this issue. - -Alternatively, you can use the `Raspberry Pi Imager `_ to flash the image. - -Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. - -If you are using a non-standard Pi Camera connected to the CSI port, :ref:`additional configuration may be required. ` - -Final Steps ------------ -Insert the flashed microSD card into your Raspberry Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. - -After the initial setup process, your Raspberry Pi should be configured for PhotonVision. You can verify this by making sure your Raspberry Pi and computer are connected to the same network and navigating to ``http://photonvision.local:5800`` in your browser on your computer. - -Troubleshooting/Setting a Static IP ------------------------------------ -A static IP address may be used as an alternative to the mDNS ``photonvision.local`` address. - -Download and run `Angry IP Scanner `_ to find PhotonVision/your coprocessor on your network. - -.. image:: images/angryIP.png - -Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. ` - -Updating PhotonVision ---------------------- - -To upgrade a Raspberry Pi device with PhotonVision already installed, follow the :ref:`Raspberry Pi update instructions`. diff --git a/docs/source/docs/installation/sw_install/romi.md b/docs/source/docs/installation/sw_install/romi.md new file mode 100644 index 0000000000..a6b327157f --- /dev/null +++ b/docs/source/docs/installation/sw_install/romi.md @@ -0,0 +1,22 @@ +# Romi Installation + +The [Romi](https://docs.wpilib.org/en/latest/docs/romi-robot/index.html) is a small robot that can be controlled with the WPILib software. The main controller is a Raspberry Pi that must be imaged with [WPILibPi](https://docs.wpilib.org/en/latest/docs/romi-robot/imaging-romi.html) . + +## Installation + +The WPILibPi image includes FRCVision, which reserves USB cameras; to use PhotonVision, we need to edit the `/home/pi/runCamera` script to disable it. First we will need to make the file system writeable; the easiest way to do this is to go to `10.0.0.2` and choose "Writable" at the top. + +SSH into the Raspberry Pi (using Windows command line, or a tool like [Putty](https://www.chiark.greenend.org.uk/~sgtatham/putty/) ) at the Romi's default address `10.0.0.2`. The default user is `pi`, and the password is `raspberry`. + +Follow the process for installing PhotonVision on {ref}`"Other Debian-Based Co-Processor Installation" `. As it mentions this will require an internet connection so plugging into the ethernet jack on the Raspberry Pi will be the easiest solution. The pi must remain writable! + +Next, from the SSH terminal, run `sudo nano /home/pi/runCamera` then arrow down to the start of the exec line and press "Enter" to add a new line. Then add `#` before the exec command to comment it out. Then, arrow up to the new line and type `sleep 10000`. Hit "Ctrl + O" and then "Enter" to save the file. Finally press "Ctrl + X" to exit nano. Now, reboot the Romi by typing `sudo reboot`. + +```{image} images/nano.png +``` + +After it reboots, you should be able to [locate the PhotonVision UI](https://photonvision.github.io/gloworm-docs/docs/quickstart/#finding-gloworm) at: `http://10.0.0.2:5800/`. + +:::{warning} +In order for settings, logs, etc. to be saved / take effect, ensure that PhotonVision is in writable mode. +::: diff --git a/docs/source/docs/installation/sw_install/romi.rst b/docs/source/docs/installation/sw_install/romi.rst deleted file mode 100644 index 55e16c2f39..0000000000 --- a/docs/source/docs/installation/sw_install/romi.rst +++ /dev/null @@ -1,21 +0,0 @@ -Romi Installation -================= - -The `Romi `_ is a small robot that can be controlled with the WPILib software. The main controller is a Raspberry Pi that must be imaged with `WPILibPi `_ . - -Installation ------------- - -The WPILibPi image includes FRCVision, which reserves USB cameras; to use PhotonVision, we need to edit the `/home/pi/runCamera` script to disable it. First we will need to make the file system writeable; the easiest way to do this is to go to ``10.0.0.2`` and choose "Writable" at the top. - -SSH into the Raspberry Pi (using Windows command line, or a tool like `Putty `_ ) at the Romi's default address ``10.0.0.2``. The default user is ``pi``, and the password is ``raspberry``. - -Follow the process for installing PhotonVision on :ref:`"Other Debian-Based Co-Processor Installation" `. As it mentions this will require an internet connection so plugging into the ethernet jack on the Raspberry Pi will be the easiest solution. The pi must remain writable! - -Next, from the SSH terminal, run ``sudo nano /home/pi/runCamera`` then arrow down to the start of the exec line and press "Enter" to add a new line. Then add ``#`` before the exec command to comment it out. Then, arrow up to the new line and type ``sleep 10000``. Hit "Ctrl + O" and then "Enter" to save the file. Finally press "Ctrl + X" to exit nano. Now, reboot the Romi by typing ``sudo reboot``. - -.. image:: images/nano.png - -After it reboots, you should be able to `locate the PhotonVision UI `_ at: ``http://10.0.0.2:5800/``. - -.. warning:: In order for settings, logs, etc. to be saved / take effect, ensure that PhotonVision is in writable mode. diff --git a/docs/source/docs/installation/sw_install/snakeyes.md b/docs/source/docs/installation/sw_install/snakeyes.md new file mode 100644 index 0000000000..e099fde408 --- /dev/null +++ b/docs/source/docs/installation/sw_install/snakeyes.md @@ -0,0 +1,56 @@ +# SnakeEyes Installation + +A Pre-Built Raspberry Pi image with configuration for [the SnakeEyes Raspberry Pi Hat](https://www.playingwithfusion.com/productview.php?pdid=133&catid=1014) is available for ease of setup. + +## Downloading the SnakeEyes Image + +Download the latest release of the SnakeEyes-specific PhotonVision Pi image from the [releases page](https://github.com/PlayingWithFusion/SnakeEyesDocs/releases). You do not need to extract the downloaded ZIP file. + +## Flashing the SnakeEyes Image + +An 8GB or larger card is recommended. + +Use the 1.18.11 version of [Balena Etcher](https://github.com/balena-io/etcher/releases/tag/v1.18.11) to flash an image onto a Raspberry Pi. Select the downloaded `.zip` file, select your microSD card, and flash. + +For more detailed instructions on using Etcher, please see the [Etcher website](https://www.balena.io/etcher/). + +:::{warning} +Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Raspberry Pi. Updating to 1.18.11 will fix this issue. +::: + +Alternatively, you can use the [Raspberry Pi Imager](https://www.raspberrypi.com/software/) to flash the image. + +Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. + +## Final Steps + +Insert the flashed microSD card into your Raspberry Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. + +After the initial setup process, your Raspberry Pi should be configured for PhotonVision. You can verify this by making sure your Raspberry Pi and computer are connected to the same network and navigating to `http://photonvision.local:5800` in your browser on your computer. + +## Troubleshooting/Setting a Static IP + +A static IP address may be used as an alternative to the mDNS `photonvision.local` address. + +Download and run [Angry IP Scanner](https://angryip.org/download/#windows) to find PhotonVision/your coprocessor on your network. + +```{image} images/angryIP.png +``` + +Once you find it, set the IP to a desired {ref}`static IP in PhotonVision. ` + +## Updating PhotonVision + +Download the latest xxxxx-LinuxArm64.jar from [our releases page](https://github.com/PhotonVision/photonvision/releases), go to the settings tab, and upload the .jar using the Offline Update button. + +As an alternative option - Export your settings, reimage your coprocessor using the instructions above, and import your settings back in. + +## Hardware Troubleshooting + +To turn the LED lights off or on you need to modify the `ledMode` network tables entry or the `camera.setLED` of PhotonLib. + +## Support Links + +- [Website](https://www.playingwithfusion.com/productview.php?pdid=133) +- [Image](https://github.com/PlayingWithFusion/SnakeEyesDocs/releases/latest) +- [Documentation](https://github.com/PlayingWithFusion/SnakeEyesDocs/blob/master/PhotonVision/readme.md) diff --git a/docs/source/docs/installation/sw_install/snakeyes.rst b/docs/source/docs/installation/sw_install/snakeyes.rst deleted file mode 100644 index 971a32f5d1..0000000000 --- a/docs/source/docs/installation/sw_install/snakeyes.rst +++ /dev/null @@ -1,56 +0,0 @@ -SnakeEyes Installation -====================== -A Pre-Built Raspberry Pi image with configuration for `the SnakeEyes Raspberry Pi Hat `_ is available for ease of setup. - -Downloading the SnakeEyes Image -------------------------------- -Download the latest release of the SnakeEyes-specific PhotonVision Pi image from the `releases page `_. You do not need to extract the downloaded ZIP file. - -Flashing the SnakeEyes Image ----------------------------- -An 8GB or larger card is recommended. - -Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto a Raspberry Pi. Select the downloaded ``.zip`` file, select your microSD card, and flash. - -For more detailed instructions on using Etcher, please see the `Etcher website `_. - -.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Raspberry Pi. Updating to 1.18.11 will fix this issue. - -Alternatively, you can use the `Raspberry Pi Imager `_ to flash the image. - -Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. - -Final Steps ------------ -Insert the flashed microSD card into your Raspberry Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. - -After the initial setup process, your Raspberry Pi should be configured for PhotonVision. You can verify this by making sure your Raspberry Pi and computer are connected to the same network and navigating to ``http://photonvision.local:5800`` in your browser on your computer. - -Troubleshooting/Setting a Static IP ------------------------------------ -A static IP address may be used as an alternative to the mDNS ``photonvision.local`` address. - -Download and run `Angry IP Scanner `_ to find PhotonVision/your coprocessor on your network. - -.. image:: images/angryIP.png - -Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. ` - -Updating PhotonVision ----------------------- -Download the latest xxxxx-LinuxArm64.jar from `our releases page `_, go to the settings tab, and upload the .jar using the Offline Update button. - -As an alternative option - Export your settings, reimage your coprocessor using the instructions above, and import your settings back in. - -Hardware Troubleshooting ------------------------- -To turn the LED lights off or on you need to modify the ``ledMode`` network tables entry or the ``camera.setLED`` of PhotonLib. - -Support Links -------------- - -* `Website `__ - -* `Image `__ - -* `Documentation `__ diff --git a/docs/source/docs/installation/sw_install/windows-pc.md b/docs/source/docs/installation/sw_install/windows-pc.md new file mode 100644 index 0000000000..9bfde737c5 --- /dev/null +++ b/docs/source/docs/installation/sw_install/windows-pc.md @@ -0,0 +1,41 @@ +# Windows PC Installation + +PhotonVision may be run on a Windows Desktop PC for basic testing and evaluation. + +:::{note} +You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). +::: + +## Install Bonjour + +Bonjour provides more stable networking when using Windows PCs. Install [Bonjour here](https://support.apple.com/downloads/DL999/en_US/BonjourPSSetup.exe) before continuing to ensure a stable experience while using PhotonVision. + +## Installing Java + +PhotonVision requires a JDK installed and on the system path. **JDK 11 is needed** (different versions will not work). You may already have this if you have installed WPILib, but ensure that running `java -version` shows JDK 11. If not, [download and install it from here](https://adoptium.net/temurin/releases?version=11) and ensure that the new JDK is being used. + +:::{warning} +Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. +::: + +## Downloading the Latest Stable Release of PhotonVision + +Go to the [GitHub releases page](https://github.com/PhotonVision/photonvision/releases) and download the winx64.jar file. + +## Running PhotonVision + +To run PhotonVision, open a terminal window of your choice and run the following command: + +``` +> java -jar C:\path\to\photonvision\NAME OF JAR FILE GOES HERE.jar +``` + +If your computer has a compatible webcam connected, PhotonVision should startup without any error messages. If there are error messages, your webcam isn't supported or another issue has occurred. If it is the latter, please open an issue on the [PhotonVision issues page](https://github.com/PhotonVision/photonvision/issues). + +:::{warning} +Using an integrated laptop camera may cause issues when trying to run PhotonVision. If you are unable to run PhotonVision on a laptop with an integrated camera, try disabling the camera's driver in Windows Device Manager. +::: + +## Accessing the PhotonVision Interface + +Once the Java backend is up and running, you can access the main vision interface by navigating to `localhost:5800` inside your browser. diff --git a/docs/source/docs/installation/sw_install/windows-pc.rst b/docs/source/docs/installation/sw_install/windows-pc.rst deleted file mode 100644 index 33dda46899..0000000000 --- a/docs/source/docs/installation/sw_install/windows-pc.rst +++ /dev/null @@ -1,35 +0,0 @@ -Windows PC Installation -======================= -PhotonVision may be run on a Windows Desktop PC for basic testing and evaluation. - -.. note:: You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). - -Install Bonjour ---------------- -Bonjour provides more stable networking when using Windows PCs. Install `Bonjour here `_ before continuing to ensure a stable experience while using PhotonVision. - -Installing Java ---------------- -PhotonVision requires a JDK installed and on the system path. **JDK 11 is needed** (different versions will not work). You may already have this if you have installed WPILib, but ensure that running ``java -version`` shows JDK 11. If not, `download and install it from here `_ and ensure that the new JDK is being used. - -.. warning:: Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. - -Downloading the Latest Stable Release of PhotonVision ------------------------------------------------------ -Go to the `GitHub releases page `_ and download the winx64.jar file. - -Running PhotonVision --------------------- -To run PhotonVision, open a terminal window of your choice and run the following command: - -.. code-block:: - - > java -jar C:\path\to\photonvision\NAME OF JAR FILE GOES HERE.jar - -If your computer has a compatible webcam connected, PhotonVision should startup without any error messages. If there are error messages, your webcam isn't supported or another issue has occurred. If it is the latter, please open an issue on the `PhotonVision issues page `_. - -.. warning:: Using an integrated laptop camera may cause issues when trying to run PhotonVision. If you are unable to run PhotonVision on a laptop with an integrated camera, try disabling the camera's driver in Windows Device Manager. - -Accessing the PhotonVision Interface ------------------------------------- -Once the Java backend is up and running, you can access the main vision interface by navigating to ``localhost:5800`` inside your browser. diff --git a/docs/source/docs/installation/updating.md b/docs/source/docs/installation/updating.md new file mode 100644 index 0000000000..76ad2ea54f --- /dev/null +++ b/docs/source/docs/installation/updating.md @@ -0,0 +1,54 @@ +# Updating PhotonVision + +PhotonVision provides many different files on a single release page. Each release contains JAR files for performing "offline updates" of a device with PhotonVision already installed, as well as full image files to "flash" to supported coprocessors. + +```{image} images/release-page.png +:alt: Example GitHub release page +``` + +In the example release above, we see: + +- Image files for flashing directly to supported coprocessors. + + - Raspberry Pi 3/4/5/CM4: follow our {ref}`Raspberry Pi flashing instructions`. + - For LimeLight devices: follow our {ref}`LimeLight flashing instructions`. + - For Orange Pi 5 devices: follow our {ref}`Orange Pi flashing instructions`. + +- JAR files for the suite of supported operating systems for use with Offline Update. In general: + + - Raspberry Pi, Limelight, and Orange Pi: use images suffixed with -linuxarm64.jar. For example: {code}`photonvision-v2024.1.1-linuxarm64.jar` + - Beelink and other Intel/AMD-based Mini-PCs: use images suffixed with -linuxx64.jar. For example: {code}`photonvision-v2024.1.1-linuxx64.jar` + +## Offline Update + +Unless noted in the release page, an offline update allows you to quickly upgrade the version of PhotonVision running on a coprocessor with PhotonVision already installed on it. + +Unless otherwise noted on the release page, config files should be backward compatible with previous version of PhotonVision, and this offline update process should preserve any pipelines and calibrations previously performed. For paranoia, we suggest exporting settings from the Settings tab prior to performing an offline update. + +:::{note} +Carefully review release notes to ensure that reflashing the device (for supported devices) or other installation steps are not required, as dependencies needed for PhotonVision may change between releases +::: + +## Installing Pre-Release Versions + +Pre-release/development version of PhotonVision can be tested by installing/downloading artifacts from Github Actions (see below), which are built automatically on commits to open pull requests and to PhotonVision's `master` branch, or by {ref}`compiling PhotonVision locally `. + +:::{warning} +If testing a pre-release version of PhotonVision with a robot, PhotonLib must be updated to match the version downloaded! If not, packet schema definitions may not match and unexpected things will occur. To update PhotonLib, refer to {ref}`installing specific version of PhotonLib`. +::: + +GitHub Actions builds pre-release version of PhotonVision automatically on PRs and on each commit merged to master. To test a particular commit to master, navigate to the [PhotonVision commit list](https://github.com/PhotonVision/photonvision/commits/master/) and click on the check mark (below). Scroll to "Build / Build fat JAR - PLATFORM", click details, and then summary. From here, JAR and image files can be downloaded to be flashed or uploaded using "Offline Update". + +```{image} images/gh_actions_1.png +:alt: Github Actions Badge +``` + +```{image} images/gh_actions_2.png +:alt: Github Actions artifact list +``` + +Built JAR files (but not image files) can also be downloaded from PRs before they are merged. Navigate to the PR in GitHub, and select Checks at the top. Click on "Build" to display the same artifact list as above. + +```{image} images/gh_actions_3.png +:alt: Github Actions artifacts from PR +``` diff --git a/docs/source/docs/installation/updating.rst b/docs/source/docs/installation/updating.rst deleted file mode 100644 index f5a98fbec1..0000000000 --- a/docs/source/docs/installation/updating.rst +++ /dev/null @@ -1,49 +0,0 @@ -Updating PhotonVision -===================== - -PhotonVision provides many different files on a single release page. Each release contains JAR files for performing "offline updates" of a device with PhotonVision already installed, as well as full image files to "flash" to supported coprocessors. - -.. image:: images/release-page.png - :alt: Example GitHub release page - -In the example release above, we see: - -- Image files for flashing directly to supported coprocessors. - - - Raspberry Pi 3/4/5/CM4: follow our :ref:`Raspberry Pi flashing instructions`. - - For LimeLight devices: follow our :ref:`LimeLight flashing instructions`. - - For Orange Pi 5 devices: follow our :ref:`Orange Pi flashing instructions`. - -- JAR files for the suite of supported operating systems for use with Offline Update. In general: - - - Raspberry Pi, Limelight, and Orange Pi: use images suffixed with -linuxarm64.jar. For example: :code:`photonvision-v2024.1.1-linuxarm64.jar` - - Beelink and other Intel/AMD-based Mini-PCs: use images suffixed with -linuxx64.jar. For example: :code:`photonvision-v2024.1.1-linuxx64.jar` - -Offline Update --------------- - -Unless noted in the release page, an offline update allows you to quickly upgrade the version of PhotonVision running on a coprocessor with PhotonVision already installed on it. - -Unless otherwise noted on the release page, config files should be backward compatible with previous version of PhotonVision, and this offline update process should preserve any pipelines and calibrations previously performed. For paranoia, we suggest exporting settings from the Settings tab prior to performing an offline update. - -.. note:: Carefully review release notes to ensure that reflashing the device (for supported devices) or other installation steps are not required, as dependencies needed for PhotonVision may change between releases - -Installing Pre-Release Versions -------------------------------- - -Pre-release/development version of PhotonVision can be tested by installing/downloading artifacts from Github Actions (see below), which are built automatically on commits to open pull requests and to PhotonVision's ``master`` branch, or by :ref:`compiling PhotonVision locally `. - -.. warning:: If testing a pre-release version of PhotonVision with a robot, PhotonLib must be updated to match the version downloaded! If not, packet schema definitions may not match and unexpected things will occur. - -Github Actions builds pre-release version of PhotonVision automatically on PRs and on each commit merged to master. To test a particular commit to master, navigate to the `PhotonVision commit list `_ and click on the check mark (below). Scroll to "Build / Build fat JAR - PLATFORM", click details, and then summary. From here, JAR and image files can be downloaded to be flashed or uploaded using "Offline Update". - -.. image:: images/gh_actions_1.png - :alt: Github Actions Badge - -.. image:: images/gh_actions_2.png - :alt: Github Actions artifact list - -Built JAR files (but not image files) can also be downloaded from PRs before they are merged. Navigate to the PR in GitHub, and select Checks at the top. Click on "Build" to display the same artifact list as above. - -.. image:: images/gh_actions_3.png - :alt: Github Actions artifacts from PR diff --git a/docs/source/docs/installation/wiring.md b/docs/source/docs/installation/wiring.md new file mode 100644 index 0000000000..c20fc9916d --- /dev/null +++ b/docs/source/docs/installation/wiring.md @@ -0,0 +1,42 @@ +# Wiring + +## Off-Robot Wiring + +Plugging your coprocessor into the wall via a power brick will suffice for off robot wiring. + +:::{note} +Please make sure your chosen power supply can provide enough power for your coprocessor. Undervolting (where enough power isn't being supplied) can cause many issues. +::: + +## On-Robot Wiring + +:::{note} +We recommend users use the [SnakeEyes Pi Hat](https://www.playingwithfusion.com/productview.php?pdid=133) as it provides passive power over ethernet (POE) and other useful features to simplify wiring and make your life easier. +::: + +### Recommended: Coprocessor with Passive POE (Gloworm, Pi with SnakeEyes, Limelight) + +1. Plug the [passive POE injector](https://www.revrobotics.com/rev-11-1210/) into the coprocessor and wire it to PDP/PDH (NOT the VRM). +2. Add a breaker to relevant slot in your PDP/PDH +3. Run an ethernet cable from the passive POE injector to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the [networking](networking.md) section for more info.) + +### Coprocessor without Passive POE + +1a. Option 1: Get a micro USB (may be USB-C if using a newer Pi) pigtail cable and connect the wire ends to a regulator like [this](https://www.pololu.com/product/4082). Then, wire the regulator into your PDP/PDH and the Micro USB / USB C into your coprocessor. + +1b. Option 2: Use a USB power bank to power your coprocessor. Refer to this year's robot rulebook on legal implementations of this. + +2. Run an ethernet cable from your Pi to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the [networking](networking.md) section for more info.) + +This diagram shows how to use the recommended regulator to power a coprocessor. + +```{image} images/pololu-diagram.png +:alt: A flowchart-type diagram showing how to connect wires from the PDP or PDH to +: the recommended voltage regulator and then a Coprocessor. +``` + +:::{note} +The regulator comes with optional screw terminals that may be used to connect the PDP/PDH and Coprocessor power wires if you do not wish to solder them. +::: + +Once you have wired your coprocessor, you are now ready to install PhotonVision. diff --git a/docs/source/docs/installation/wiring.rst b/docs/source/docs/installation/wiring.rst deleted file mode 100644 index 3e00f3d0af..0000000000 --- a/docs/source/docs/installation/wiring.rst +++ /dev/null @@ -1,42 +0,0 @@ -Wiring -====== - - -Off-Robot Wiring ----------------- - -Plugging your coprocessor into the wall via a power brick will suffice for off robot wiring. - -.. note:: Please make sure your chosen power supply can provide enough power for your coprocessor. Undervolting (where enough power isn't being supplied) can cause many issues. - - -On-Robot Wiring ---------------- - -.. note:: We recommend users use the `SnakeEyes Pi Hat `_ as it provides passive power over ethernet (POE) and other useful features to simplify wiring and make your life easier. - -Recommended: Coprocessor with Passive POE (Gloworm, Pi with SnakeEyes, Limelight) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Plug the `passive POE injector `_ into the coprocessor and wire it to PDP/PDH (NOT the VRM). - -2. Add a breaker to relevant slot in your PDP/PDH - -3. Run an ethernet cable from the passive POE injector to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the `networking `_ section for more info.) - -Coprocessor without Passive POE -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1a. Option 1: Get a micro USB (may be USB-C if using a newer Pi) pigtail cable and connect the wire ends to a regulator like `this `_. Then, wire the regulator into your PDP/PDH and the Micro USB / USB C into your coprocessor. - -1b. Option 2: Use a USB power bank to power your coprocessor. Refer to this year's robot rulebook on legal implementations of this. - -2. Run an ethernet cable from your Pi to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the `networking `_ section for more info.) - -This diagram shows how to use the recommended regulator to power a coprocessor. - -.. image:: images/pololu-diagram.png - :alt: A flowchart-type diagram showing how to connect wires from the PDP or PDH to the recommended voltage regulator and then a Coprocessor. - -.. note:: The regulator comes with optional screw terminals that may be used to connect the PDP/PDH and Coprocessor power wires if you do not wish to solder them. - -Once you have wired your coprocessor, you are now ready to install PhotonVision. diff --git a/docs/source/docs/integration/advancedStrategies.md b/docs/source/docs/integration/advancedStrategies.md new file mode 100644 index 0000000000..54f66f1244 --- /dev/null +++ b/docs/source/docs/integration/advancedStrategies.md @@ -0,0 +1,68 @@ +# Advanced Strategies + +Advanced strategies for using vision processing results involve working with the robot's *pose* on the field. + +A *pose* is a combination an X/Y coordinate, and an angle describing where the robot's front is pointed. A pose is always considered *relative* to some fixed point on the field. + +WPILib provides a [Pose2d](https://docs.wpilib.org/en/stable/docs/software/advanced-controls/geometry/pose.html) class to describe poses in software. + +PhotonVision can supply correcting information to keep estimates of *pose* accurate over a full match. + +## Knowledge and Equipment Needed + +- A Coprocessor running PhotonVision + \- Accurate camera calibration to support "3D mode" required +- A Drivetrain with wheels and sensors + \- Sufficient sensors to measure wheel rotation + \- Capable of closed-loop velocity control +- A gyroscope or IMU measuring actual robot heading +- Experience using some path-planning library + +## Robot Poses from the Camera + +When using 3D mode in PhotonVision, an additional step is run to estimate the 3D position of camera, relative to one or more AprilTags. + +This process does not produce a *unique* solution. There are multiple possible camera positions which might explain the image it observed. Additionally, the camera is rarely mounted in the exact center of a robot. + +For these reasons, the 3D information must be filtered and transformed before they can describe the robot's pose. + +PhotonLib provides {ref}`a utility class to assist with this process on the roboRIO `. Alternatively, {ref}`a "multi-tag" strategy can do this process on the coprocessor. `. + +## Field-Relative Pose Estimation + +The camera's guess of the robot pose generally should be *fused* with other sensor readings. + +WPILib provides [a set of pose estimation classes](https://docs.wpilib.org/en/stable/docs/software/advanced-controls/state-space/state-space-pose-estimators.html) for doing this work. + +## I have a Pose Estimate, Now What? + +### Triggering Actions Automatically + +A simple way to use a pose estimate is to activate robot functions automatically when in the correct spot on the field. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + Pose3d robotPose; + boolean launcherSpinCmd; + + // ... + + if(robotPose.X() < 1.5){ + // Near blue alliance wall, start spinning the launcher wheel + launcherSpinCmd = True; + } else { + // Far away, no need to run launcher. + launcherSpinCmd = False; + } + + // ... +``` + +### PathPlanning + +A common, but more complex usage of a pose estimate is an input to a path-following algorithm. Specifically, the pose estimate is used to correct for the robot straying off of the pre-defined path. + +See the {ref}`Pose Estimation ` example for details on integrating this. diff --git a/docs/source/docs/integration/advancedStrategies.rst b/docs/source/docs/integration/advancedStrategies.rst deleted file mode 100644 index a2c46bf4c3..0000000000 --- a/docs/source/docs/integration/advancedStrategies.rst +++ /dev/null @@ -1,43 +0,0 @@ -Advanced Strategies -=================== - -Advanced strategies for using vision processing results involve working with the robot's *pose* on the field. A *pose* is a combination an X/Y coordinate, and an angle describing where the robot's front is pointed. It is always considered *relative* to some fixed point on the field. - -WPILib provides a `Pose2d `_ class to describe poses in software. - -Knowledge and Equipment Needed ------------------------------- - -- A Coprocessor running PhotonVision - - Accurate camera calibration to support "3D mode" required -- A Drivetrain with wheels and sensors - - Sufficient sensors to measure wheel rotation - - Capable of closed-loop velocity control -- A gyroscope or IMU measuring actual robot heading -- Experience using some path-planning library (WPILib is our recommendation) - -Path Planning in a Target-Centered Reference Frame --------------------------------------------------- - -When using 3D mode in PhotonVision, the `SolvePNP Algorithm `_ is used to deduce the *camera\'s* position in a 3D coordinate system centered on the target itself. - -A simple algorithm for using this measurement is: - -#. Assume your robot needs to be at a fixed ``Pose2D`` *relative to the target*. -#. When triggered: - #. Read the most recent vision measurement - this is your *actual* pose. - #. Generate a simple trajectory to the goal position - #. Execute the trajectory - -.. note:: There is not currently an example demonstrating this technique. - -Global Pose Estimation ----------------------- - -A more complex way to utilize a camera-supplied ``Pose2D`` is to incorporate it into an estimation of the robot's ``Pose2D`` in a global field reference frame. - -When using this strategy, the measurements made by the camera are *fused* with measurements from other sensors, a model of expected robot behavior, and a matrix of weights that describes how trustworthy each sensor is. The result is a *best-guess* at the current pose on the field. - -In turn, this best-guess position is used to path plan to the known positions on the field, which may or may not have vision targets nearby. - -See the :ref:`Pose Estimation ` example for more information. diff --git a/docs/source/docs/integration/aprilTagStrategies.rst b/docs/source/docs/integration/aprilTagStrategies.rst deleted file mode 100644 index 420baf4c00..0000000000 --- a/docs/source/docs/integration/aprilTagStrategies.rst +++ /dev/null @@ -1,48 +0,0 @@ -AprilTag Strategies -==================== - -.. note:: The same strategies covered in the simple and advanced strategy sections still apply to AprilTags, and we encourage you to read them first. This page will discuss the specific nuances to using AprilTags. - -Simple Strategies ------------------ - -Prior to the introduction of AprilTags, the most common vision strategy for teams was to use the yaw of the detected target in order to turn to the target, and then score. This is still possible with AprilTags as the yaw of the tag is reported. Similarly, getting the distance to the target via trigonometry will also work. This is discussed in greater detail in the previous page. - -Advanced Strategies -------------------- -AprilTags allows you find the robot pose on the field using data from the tags. A pose is a combination an X/Y coordinate, and an angle describing where the robot’s front is pointed. It is always considered relative to some fixed point on the field. - -Knowledge and Equipment Needed -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Knowledge - -* How to tune an AprilTag Pipeline (found in the pipeline tuning section) - -Equipment - -* A Coprocessor running PhotonVision - Accurate camera calibration to support “3D mode” required - -* A Drivetrain with wheels and sensors (Sufficient sensors to measure wheel rotation and capable of closed-loop velocity control) - -* A gyroscope or IMU measuring actual robot heading - -Global Pose Estimation / Pose Estimation Strategies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. note:: See the previous page for more general information. Most of the information is the same except now the camera is supplying a ``Pose3D``. - -The nature of how AprilTags will be laid out makes it very likely that you will get multiple pose measurements within a single frame from seeing multiple targets. This requires strategies to fuse these observations together and get a "best guess" as to where your robot is. The best way to do this is to use the corners from all visible AprilTags to estimate the robot's pose. This is done by using the ``PhotonPoseEstimator`` class and the "MULTI_TAG_PNP_ON_COPROCESSOR" strategy. Additional strategies include: - -* A camera seeing multiple targets, taking the average of all the returned poses -* A camera seeing one target, with an assumed height off the ground, picking the pose which places it to the assumed height -* A camera seeing one target, and picking a pose most similar to the most recently observed pose -* A camera seeing one target, and picking a pose most similar to one provided externally (ie, from previous loop's odometry) -* A camera seeing one target, and picking the pose with the lowest ambiguity. - -PhotonVision supports all of these different strategies via our ``PhotonPoseEstimator`` class that allows you to select one of the strategies above and get the relevant pose estimation. - -Tuning Pose Estimators -^^^^^^^^^^^^^^^^^^^^^^ - -Coming soon! -TODO: Add this back in once simposeest example is added. diff --git a/docs/source/docs/integration/background.rst b/docs/source/docs/integration/background.md similarity index 79% rename from docs/source/docs/integration/background.rst rename to docs/source/docs/integration/background.md index 1615aacea2..1341e3acdb 100644 --- a/docs/source/docs/integration/background.rst +++ b/docs/source/docs/integration/background.md @@ -1,8 +1,6 @@ -Vision - Robot Integration Background -===================================== +# Vision - Robot Integration Background -Vision Processing's Purpose ---------------------------- +## Vision Processing's Purpose Each year, the FRC game requires a fundamental operation: **Align the Robot to a Goal**. @@ -14,8 +12,8 @@ Software strategies can be used to help augment the ability of a human operator, There are many valid strategies for doing this transformation. Picking a strategy is a balancing act between: - 1. Available team resources (time, programming skills, previous experience) - 2. Precision of alignment required - 3. Team willingness to take on risk +> 1. Available team resources (time, programming skills, previous experience) +> 2. Precision of alignment required +> 3. Team willingness to take on risk Simple strategies are low-risk - they require comparatively little effort to implement and tune, but have hard limits on the complexity of motion they can control on the robot. Advanced methods allow for more complex and precise movement, but take more effort to implement and tune. For this reason, it is more risky to attempt to use them. diff --git a/docs/source/docs/integration/index.md b/docs/source/docs/integration/index.md new file mode 100644 index 0000000000..b5bc243395 --- /dev/null +++ b/docs/source/docs/integration/index.md @@ -0,0 +1,9 @@ +# Robot Integration + +```{toctree} +:maxdepth: 2 + +background +simpleStrategies +advancedStrategies +``` diff --git a/docs/source/docs/integration/index.rst b/docs/source/docs/integration/index.rst deleted file mode 100644 index c8b9f8787a..0000000000 --- a/docs/source/docs/integration/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Robot Integration -================= - -.. toctree:: - :maxdepth: 2 - - background - simpleStrategies - advancedStrategies - aprilTagStrategies diff --git a/docs/source/docs/integration/simpleStrategies.md b/docs/source/docs/integration/simpleStrategies.md new file mode 100644 index 0000000000..1e90b5c52b --- /dev/null +++ b/docs/source/docs/integration/simpleStrategies.md @@ -0,0 +1,32 @@ +# Simple Strategies + +Simple strategies for using vision processor outputs involve using the target's position in the 2D image to infer *range* and *angle* to a particular AprilTag. + +## Knowledge and Equipment Needed + +- A Coprocessor running PhotonVision +- A Drivetrain with wheels +- An AprilTag to aim at + +## Angle Alignment + +The simplest way to align a robot to an AprilTag is to rotate the drivetrain until the tag is centered in the camera image. To do this, + +1. Read the current yaw angle to the AprilTag from the vision Coprocessor. +2. If too far off to one side, command the drivetrain to rotate in the opposite direction to compensate. + +See the {ref}`Aiming at a Target ` example for more information. + +NOTE: This works if the camera is centered on the robot. This is easiest from a software perspective. If the camera is not centered, take a peek at the next example - it shows how to account for an offset. + +## Adding Range Alignment + +By looking at the position of the AprilTag in the "vertical" direction in the image, and applying some trigonometry, the distance between the robot and the camera can be deduced. + +1. Read the current pitch angle to the AprilTag from the vision coprocessor. +2. Do math to calculate the distance to the AprilTag. +2. If too far in one direction, command the drivetrain to travel in the opposite direction to compensate. + +This can be done simultaneously while aligning to the desired angle. + +See the {ref}`Aim and Range ` example for more information. diff --git a/docs/source/docs/integration/simpleStrategies.rst b/docs/source/docs/integration/simpleStrategies.rst deleted file mode 100644 index e6ef936533..0000000000 --- a/docs/source/docs/integration/simpleStrategies.rst +++ /dev/null @@ -1,40 +0,0 @@ -Simple Strategies -================= - -Simple strategies for using vision processor outputs involve using the target's position in the 2D image to infer *range* and *angle* to the target. - -Knowledge and Equipment Needed ------------------------------- - -- A Coprocessor running PhotonVision -- A Drivetrain with wheels - -Angle Alignment ---------------- - -The simplest way to use a vision processing result is to first determine how far left or right in the image the vision target should be for your robot to be "aligned" to the target. Then, - -1. Read the current angle to the target from the vision Coprocessor. -2. If too far in one direction, command the drivetrain to rotate in the opposite direction to compensate. - -See the :ref:`Aiming at a Target ` example for more information. - -.. note:: Sometimes, these strategies have also involved incorporating a gyroscope. This can be necessary due to the high latency of vision processing algorithms. However, advancements in the tools available (including PhotonVision) has made that unnecessary for most applications. - -Range Alignment ---------------- - -By looking at the position of the target in the "vertical" direction in the image, and applying some trionometery, the distance between the robot and the camera can be deduced. - -1. Read the current distance to the target from the vision coprocessor. -2. If too far in one direction, command the drivetrain to travel in the opposite direction to compensate. - -See the :ref:`Getting in Range of the Target ` example for more information. - - -Angle + Range -------------- - -Since the previous two alignment strategies work on independent axes of the robot, there's no reason you can't do them simultaneously. - -See the :ref:`Aim and Range ` example for more information. diff --git a/docs/source/docs/objectDetection/about-object-detection.md b/docs/source/docs/objectDetection/about-object-detection.md new file mode 100644 index 0000000000..b40667e645 --- /dev/null +++ b/docs/source/docs/objectDetection/about-object-detection.md @@ -0,0 +1,47 @@ +# About Object Detection + +## How does it work? + +PhotonVision supports object detection using neural network accelerator hardware built into Orange Pi 5/5+ coprocessors. The Neural Processing Unit, or NPU, is [used by PhotonVision](https://github.com/PhotonVision/rknn_jni/tree/main) to massively accelerate certain math operations like those needed for running ML-based object detection. + +For the 2024 season, PhotonVision ships with a **pre-trained NOTE detector** (shown above), as well as a mechanism for swapping in custom models. Future development will focus on enabling lower friction management of multiple custom models. + +```{image} images/notes-ui.png +``` + +## Tracking Objects + +Before you get started with object detection, ensure that you have followed the previous sections on installation, wiring, and networking. Next, open the Web UI, go to the top right card, and switch to the “Object Detection” type. You should see a screen similar to the image above. + +PhotonVision currently ships with a NOTE detector based on a [YOLOv5 model](https://docs.ultralytics.com/yolov5/). This model is trained to detect one or more object "classes" (such as cars, stoplights, or in our case, NOTES) in an input image. For each detected object, the model outputs a bounding box around where in the image the object is located, what class the object belongs to, and a unitless confidence between 0 and 1. + +:::{note} +This model output means that while its fairly easy to say that "this rectangle probably contains a NOTE", we don't have any information about the NOTE's orientation or location. Further math in user code would be required to make estimates about where an object is physically located relative to the camera. +::: + +## Tuning and Filtering + +Compared to other pipelines, object detection exposes very few tuning handles. The Confidence slider changes the minimum confidence that the model needs to have in a given detection to consider it valid, as a number between 0 and 1 (with 0 meaning completely uncertain and 1 meaning maximally certain). + +```{raw} html + +``` + +The same area, aspect ratio, and target orientation/sort parameters from {ref}`reflective pipelines ` are also exposed in the object detection card. + +## Training Custom Models + +Coming soon! + +## Uploading Custom Models + +:::{warning} +PhotonVision currently ONLY supports YOLOv5 models trained and converted to `.rknn` format for RK3588 CPUs! Other models require different post-processing code and will NOT work. The model conversion process is also highly particular. Proceed with care. +::: + +Our [pre-trained NOTE model](https://github.com/PhotonVision/photonvision/blob/master/photon-server/src/main/resources/models/note-640-640-yolov5s.rknn) is automatically extracted from the JAR when PhotonVision starts, only if a file named “note-640-640-yolov5s.rknn” and "labels.txt" does not exist in the folder `photonvision_config/models/`. This technically allows power users to replace the model and label files with new ones without rebuilding Photon from source and uploading a new JAR. + +Use a program like WinSCP or FileZilla to access your coprocessor's filesystem, and copy the new `.rknn` model file into /home/pi. Next, SSH into the coprocessor and `sudo mv /path/to/new/model.rknn /opt/photonvision/photonvision_config/models/note-640-640-yolov5s.rknn`. Repeat this process with the labels file, which should contain one line per label the model outputs with no training newline. Next, restart PhotonVision via the web UI. diff --git a/docs/source/docs/objectDetection/about-object-detection.rst b/docs/source/docs/objectDetection/about-object-detection.rst deleted file mode 100644 index f054c2c8be..0000000000 --- a/docs/source/docs/objectDetection/about-object-detection.rst +++ /dev/null @@ -1,48 +0,0 @@ -About Object Detection -====================== - -How does it work? -^^^^^^^^^^^^^^^^^ - -PhotonVision supports object detection using neural network accelerator hardware built into Orange Pi 5/5+ coprocessors. The Neural Processing Unit, or NPU, is `used by PhotonVision `_ to massively accelerate certain math operations like those needed for running ML-based object detection. - -For the 2024 season, PhotonVision ships with a **pre-trained NOTE detector** (shown above), as well as a mechanism for swapping in custom models. Future development will focus on enabling lower friction management of multiple custom models. - -.. image:: images/notes-ui.png - -Tracking Objects -^^^^^^^^^^^^^^^^ - -Before you get started with object detection, ensure that you have followed the previous sections on installation, wiring, and networking. Next, open the Web UI, go to the top right card, and switch to the “Object Detection” type. You should see a screen similar to the image above. - -PhotonVision currently ships with a NOTE detector based on a `YOLOv5 model `_. This model is trained to detect one or more object "classes" (such as cars, stoplights, or in our case, NOTES) in an input image. For each detected object, the model outputs a bounding box around where in the image the object is located, what class the object belongs to, and a unitless confidence between 0 and 1. - -.... note:: This model output means that while its fairly easy to say that "this rectangle probably contains a NOTE", we don't have any information about the NOTE's orientation or location. Further math in user code would be required to make estimates about where an object is physically located relative to the camera. - -Tuning and Filtering -^^^^^^^^^^^^^^^^^^^^ - -Compared to other pipelines, object detection exposes very few tuning handles. The Confidence slider changes the minimum confidence that the model needs to have in a given detection to consider it valid, as a number between 0 and 1 (with 0 meaning completely uncertain and 1 meaning maximally certain). - -.. raw:: html - - - -The same area, aspect ratio, and target orientation/sort parameters from :ref:`reflective pipelines ` are also exposed in the object detection card. - -Training Custom Models -^^^^^^^^^^^^^^^^^^^^^^ - -Coming soon! - -Uploading Custom Models -^^^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: PhotonVision currently ONLY supports YOLOv5 models trained and converted to ``.rknn`` format for RK3588 CPUs! Other models require different post-processing code and will NOT work. The model conversion process is also highly particular. Proceed with care. - -Our `pre-trained NOTE model `_ is automatically extracted from the JAR when PhotonVision starts, only if a file named “note-640-640-yolov5s.rknn” and "labels.txt" does not exist in the folder ``photonvision_config/models/``. This technically allows power users to replace the model and label files with new ones without rebuilding Photon from source and uploading a new JAR. - -Use a program like WinSCP or FileZilla to access your coprocessor's filesystem, and copy the new ``.rknn`` model file into /home/pi. Next, SSH into the coprocessor and ``sudo mv /path/to/new/model.rknn /opt/photonvision/photonvision_config/models/note-640-640-yolov5s.rknn``. Repeat this process with the labels file, which should contain one line per label the model outputs with no training newline. Next, restart PhotonVision via the web UI. diff --git a/docs/source/docs/objectDetection/index.md b/docs/source/docs/objectDetection/index.md new file mode 100644 index 0000000000..694b8c1bd3 --- /dev/null +++ b/docs/source/docs/objectDetection/index.md @@ -0,0 +1,8 @@ +# Object Detection + +```{toctree} +:maxdepth: 0 +:titlesonly: true + +about-object-detection +``` diff --git a/docs/source/docs/objectDetection/index.rst b/docs/source/docs/objectDetection/index.rst deleted file mode 100644 index 0bb65ca5a0..0000000000 --- a/docs/source/docs/objectDetection/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Object Detection -================ - -.. toctree:: - :maxdepth: 0 - :titlesonly: - - about-object-detection diff --git a/docs/source/docs/pipelines/about-pipelines.rst b/docs/source/docs/pipelines/about-pipelines.md similarity index 84% rename from docs/source/docs/pipelines/about-pipelines.rst rename to docs/source/docs/pipelines/about-pipelines.md index 560deff330..744a6b807d 100644 --- a/docs/source/docs/pipelines/about-pipelines.rst +++ b/docs/source/docs/pipelines/about-pipelines.md @@ -1,48 +1,42 @@ -:orphan: +--- +orphan: true +--- -About Pipelines -=============== +# About Pipelines -What is a pipeline? -^^^^^^^^^^^^^^^^^^^ +## What is a pipeline? A vision pipeline represents a series of steps that are used to acquire an image, process it, and analyzing it to find a target. In most FRC games, this means processing an image in order to detect a piece of retroreflective tape or an AprilTag. -Types of Pipelines -^^^^^^^^^^^^^^^^^^ +## Types of Pipelines -Reflective ----------- +### Reflective This is the most common pipeline type and it is based on detecting targets with retroreflective tape. In the contours tab of this pipeline type, you can filter the area, width/height ratio, fullness, degree of speckle rejection. -Colored Shape -------------- +### Colored Shape This pipeline type is based on detecting different shapes like circles, triangles, quadrilaterals, or a polygon. An example usage would be detecting yellow PowerCells from the 2020 FRC game. You can read more about the specific settings available in the contours page. -AprilTag / AruCo ----------------- +### AprilTag / AruCo This pipeline type is based on detecting AprilTag fiducial markers. More information about AprilTags can be found in the WPILib documentation. While being more performance intensive than the reflective and colored shape pipeline, it has the benefit of providing easy to use 3D pose information which allows localization. -.. note:: In order to get 3D Pose data about AprilTags, you are required to :ref:`calibrate your camera`. +:::{note} +In order to get 3D Pose data about AprilTags, you are required to {ref}`calibrate your camera`. +::: -Note About Multiple Cameras and Pipelines -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +## Note About Multiple Cameras and Pipelines When using more than one camera, it is important to keep in mind that all cameras run one pipeline each, all publish to NT, and all send both streams. This will have a noticeable affect on performance and we recommend users limit themselves to 1-2 cameras per coprocessor. -Pipeline Steps -^^^^^^^^^^^^^^ +## Pipeline Steps + Reflective and Colored Shape Pipelines have 4 steps (represented as 4 tabs): 1. Input: This tab allows the raw camera image to be modified before it gets processed. Here, you can set exposure, brightness, gain, orientation, and resolution. - 2. Threshold (Only Reflective and Colored Shape): This tabs allows you to filter our specific colors/pixels in your camera stream through HSV tuning. The end goal here is having a black and white image that will only have your target lit up. - 3. Contours: After thresholding, contiguous white pixels are grouped together, and described by a curve that outlines the group. This curve is called a "contour" which represent various targets on your screen. Regardless of type, you can filter how the targets are grouped, their intersection, and how the targets are sorted. Other available filters will change based on different pipeline types. - 4. Output: Now that you have filtered all of your contours, this allows you to manipulate the detected target via orientation, the offset point, and offset. AprilTag / AruCo Pipelines have 3 steps: diff --git a/docs/source/docs/pipelines/index.md b/docs/source/docs/pipelines/index.md new file mode 100644 index 0000000000..7d8ba7dbd2 --- /dev/null +++ b/docs/source/docs/pipelines/index.md @@ -0,0 +1,7 @@ +# Pipelines + +```{toctree} +about-pipelines +input +output +``` diff --git a/docs/source/docs/pipelines/index.rst b/docs/source/docs/pipelines/index.rst deleted file mode 100644 index cf9f55811b..0000000000 --- a/docs/source/docs/pipelines/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Pipelines -========= - -.. toctree:: - - about-pipelines - input - output diff --git a/docs/source/docs/pipelines/input.rst b/docs/source/docs/pipelines/input.md similarity index 77% rename from docs/source/docs/pipelines/input.rst rename to docs/source/docs/pipelines/input.md index 553dbf02f4..ec93c1218e 100644 --- a/docs/source/docs/pipelines/input.rst +++ b/docs/source/docs/pipelines/input.md @@ -1,10 +1,8 @@ -Camera Tuning / Input -===================== +# Camera Tuning / Input PhotonVision's "Input" tab contains settings that affect the image captured by the currently selected camera. This includes camera exposure and brightness, as well as resolution and orientation. -Resolution ----------- +## Resolution Resolution changes the resolution of the image captured. While higher resolutions are often more accurate than lower resolutions, they also run at a slower update rate. @@ -12,20 +10,21 @@ When using the reflective/colored shape pipeline, detection should be run as low When using the AprilTag pipeline, you should try to use as high of a resolution as you can while still maintaining a reasonable FPS measurement. This is because higher resolution allows you to detect tags with higher accuracy and from larger distances. -Exposure and brightness ------------------------ +## Exposure and brightness Camera exposure and brightness control how bright the captured image will be, although they function differently. Camera exposure changes how long the camera shutter lets in light, which changes the overall brightness of the captured image. This is in contrast to brightness, which is a post-processing effect that boosts the overall brightness of the image at the cost of desaturating colors (making colors look less distinct). -.. important:: For all pipelines, exposure time should be set as low as possible while still allowing for the target to be reliably tracked. This allows for faster processing as decreasing exposure will increase your camera FPS. +:::{important} +For all pipelines, exposure time should be set as low as possible while still allowing for the target to be reliably tracked. This allows for faster processing as decreasing exposure will increase your camera FPS. +::: For reflective pipelines, after adjusting exposure and brightness, the target should be lit green (or the color of the vision tracking LEDs used). The more distinct the color of the target, the more likely it will be tracked reliably. -.. note:: Unlike with retroreflective tape, AprilTag tracking is not very dependent on lighting consistency. If you have trouble detecting tags due to low light, you may want to try increasing exposure, but this will likely decrease your achievable framerate. +:::{note} +Unlike with retroreflective tape, AprilTag tracking is not very dependent on lighting consistency. If you have trouble detecting tags due to low light, you may want to try increasing exposure, but this will likely decrease your achievable framerate. +::: - -AprilTags and Motion Blur -^^^^^^^^^^^^^^^^^^^^^^^^^ +### AprilTags and Motion Blur For AprilTag pipelines, your goal is to reduce the "motion blur" as much as possible. Motion blur is the visual streaking/smearing on the camera stream as a result of movement of the camera or object of focus. You want to mitigate this as much as possible because your robot is constantly moving and you want to be able to read as many tags as you possibly can. The possible solutions to this include: @@ -33,15 +32,14 @@ For AprilTag pipelines, your goal is to reduce the "motion blur" as much as poss 2. Using a global shutter (as opposed to rolling shutter) camera. This should eliminate most, if not all motion blur. 3. Only rely on tags when not moving. -.. image:: images/motionblur.gif - :align: center +```{image} images/motionblur.gif +:align: center +``` -Orientation ------------ +## Orientation Orientation can be used to rotate the image prior to vision processing. This can be useful for cases where the camera is not oriented parallel to the ground. Do note that this operation can in some cases significantly reduce FPS. -Stream Resolution ------------------ +## Stream Resolution This changes the resolution which is used to stream frames from PhotonVision. This does not change the resolution used to perform vision processing. This is useful to reduce bandwidth consumption on the field. In some high-resolution cases, decreasing stream resolution can increase processing FPS. diff --git a/docs/source/docs/pipelines/output.rst b/docs/source/docs/pipelines/output.md similarity index 77% rename from docs/source/docs/pipelines/output.rst rename to docs/source/docs/pipelines/output.md index 3ec552c5a9..072cf1924e 100644 --- a/docs/source/docs/pipelines/output.rst +++ b/docs/source/docs/pipelines/output.md @@ -1,24 +1,21 @@ -Output -====== +# Output The output card contains sections for target manipulation and offset modes. -Target Manipulation -------------------- +## Target Manipulation In this section, the Target Offset Point changes where the "center" of the target is. This can be useful if the pitch/yaw of the middle of the top edge of the target is desired, rather than the center of mass of the target. The "top"/"bottom"/"left"/"right" of the target are defined by the Target Orientation selection. For example, a 400x200px target in landscape mode would have the "top" offset point located at the middle of the uppermost long edge of the target, while in portrait mode the "top" offset point would be located in the middle of the topmost short edge (in this case, either the left or right sides). -This section also includes a switch to enable processing and sending multiple targets, up to 5, simultaneously. This information is available through PhotonLib. Note that the :code:`GetPitch`/:code:`GetYaw` methods will report the pitch/yaw of the "best" (lowest indexed) target. +This section also includes a switch to enable processing and sending multiple targets, up to 5, simultaneously. This information is available through PhotonLib. Note that the {code}`GetPitch`/{code}`GetYaw` methods will report the pitch/yaw of the "best" (lowest indexed) target. -.. raw:: html +```{raw} html + +``` - - -Robot Offset ------------- +## Robot Offset PhotonVision offers both single and dual point offset modes. In single point mode, the "Take Point" button will set the crosshair location to the center of the current "best" target. diff --git a/docs/source/docs/programming/index.md b/docs/source/docs/programming/index.md new file mode 100644 index 0000000000..3fe28f5843 --- /dev/null +++ b/docs/source/docs/programming/index.md @@ -0,0 +1,11 @@ +--- +orphan: true +--- + +# Programming Reference + +```{toctree} +:maxdepth: 1 + +photonlib/index +``` diff --git a/docs/source/docs/programming/index.rst b/docs/source/docs/programming/index.rst deleted file mode 100644 index 38d5fd4f8c..0000000000 --- a/docs/source/docs/programming/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -:orphan: - -Programming Reference -===================== - -.. toctree:: - :maxdepth: 1 - - photonlib/index diff --git a/docs/source/docs/programming/photonlib/adding-vendordep.md b/docs/source/docs/programming/photonlib/adding-vendordep.md new file mode 100644 index 0000000000..5a08d93207 --- /dev/null +++ b/docs/source/docs/programming/photonlib/adding-vendordep.md @@ -0,0 +1,48 @@ +# Installing PhotonLib + +## What is PhotonLib? + +PhotonLib is the C++ and Java vendor dependency that accompanies PhotonVision. We created this vendor dependency to make it easier for teams to retrieve vision data from their integrated vision system. + +PhotonLibPy is a minimal, pure-python implementation of PhotonLib. + +## Online Install - Java/C++ + +Click on the WPI icon on the top right of your VS Code window or hit Ctrl+Shift+P (Cmd+Shift+P on macOS) to bring up the command palette. Type, "Manage Vendor Libraries" and select the "WPILib: Manage Vendor Libraries" option. Then, select the "Install new library (online)" option. + +```{image} images/adding-offline-library.png +``` + +Paste the following URL into the box that pops up: + +`https://maven.photonvision.org/repository/internal/org/photonvision/photonlib-json/1.0/photonlib-json-1.0.json` + +:::{note} +It is recommended to Build Robot Code at least once when connected to the Internet before heading to an area where Internet connectivity is limited (for example, a competition). This ensures that the relevant files are downloaded to your filesystem. +::: + +Refer to [The WPILib docs](https://docs.wpilib.org/en/stable/docs/software/vscode-overview/3rd-party-libraries.html#installing-libraries) for more details on installing vendor libraries. + +## Offline Install - Java/C++ + +Download the latest PhotonLib release from our GitHub releases page (named something like `` photonlib-VERSION.zip` ``), and extract the contents to `$HOME/wpilib/YEAR`. This adds PhotonLib maven artifacts to your local maven repository. PhotonLib will now also appear available in the "install vendor libraries (offline)" menu in WPILib VSCode. Refer to [the WPILib docs](https://docs.wpilib.org/en/stable/docs/software/vscode-overview/3rd-party-libraries.html#installing-libraries) for more details on installing vendor libraries offline. + +## Install - Python + +Add photonlibpy to `pyproject.toml`. + +```toml +# Other pip packages to install +requires = [ + "photonlibpy", +] +``` + +See [The WPILib/RobotPy docs](https://docs.wpilib.org/en/stable/docs/software/python/pyproject_toml.html) for more information on using `pyproject.toml.` + +## Install Specific Version - Java/C++ + +In cases where you want to test a specific version of PhotonLib, make sure you have finished the steps in Online Install - Java/C++ and then manually change the version string in the PhotonLib vendordep json file(at ``/path/to/your/project/vendordep/photonlib.json``) to your desired version. + +```{image} images/photonlib-vendordep-json.png +``` diff --git a/docs/source/docs/programming/photonlib/adding-vendordep.rst b/docs/source/docs/programming/photonlib/adding-vendordep.rst deleted file mode 100644 index 178ad29c53..0000000000 --- a/docs/source/docs/programming/photonlib/adding-vendordep.rst +++ /dev/null @@ -1,37 +0,0 @@ -Installing PhotonLib -==================== - -What is PhotonLib? ------------------- -PhotonLib is the C++ and Java vendor dependency that accompanies PhotonVision. We created this vendor dependency to make it easier for teams to retrieve vision data from their integrated vision system. - -PhotonLibPy is a minimal, pure-python implementation of PhotonLib. - -Online Install - Java/C++ -------------------------- -Click on the WPI icon on the top right of your VS Code window or hit Ctrl+Shift+P (Cmd+Shift+P on macOS) to bring up the command palette. Type, "Manage Vendor Libraries" and select the "WPILib: Manage Vendor Libraries" option. Then, select the "Install new library (online)" option. - -.. image:: images/adding-offline-library.png - -Paste the following URL into the box that pops up: - -``https://maven.photonvision.org/repository/internal/org/photonvision/photonlib-json/1.0/photonlib-json-1.0.json`` - -.. note:: It is recommended to Build Robot Code at least once when connected to the Internet before heading to an area where Internet connectivity is limited (for example, a competition). This ensures that the relevant files are downloaded to your filesystem. - -Offline Install - Java/C++ --------------------------- -This installation option is currently a work-in-progress. For now, we recommend users use the online installation method. - -Install - Python ----------------- -Add photonlibpy to `pyproject.toml`. - -.. code-block:: toml - - # Other pip packages to install - requires = [ - "photonlibpy", - ] - -See `The WPILib/RobotPy docs `_ for more information on using `pyproject.toml.` diff --git a/docs/source/docs/programming/photonlib/controlling-led.md b/docs/source/docs/programming/photonlib/controlling-led.md new file mode 100644 index 0000000000..7bd6fd0c32 --- /dev/null +++ b/docs/source/docs/programming/photonlib/controlling-led.md @@ -0,0 +1,20 @@ +# Controlling LEDs + +You can control the vision LEDs of supported hardware via PhotonLib using the `setLED()` method on a `PhotonCamera` instance. In Java and C++, an `VisionLEDMode` enum class is provided to choose values from. These values include, `kOff`, `kOn`, `kBlink`, and `kDefault`. `kDefault` uses the default LED value from the selected pipeline. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Blink the LEDs. + camera.setLED(VisionLEDMode.kBlink); + + .. code-block:: C++ + + // Blink the LEDs. + camera.SetLED(photonlib::VisionLEDMode::kBlink); + + .. code-block:: Python + + # Coming Soon! +``` diff --git a/docs/source/docs/programming/photonlib/controlling-led.rst b/docs/source/docs/programming/photonlib/controlling-led.rst deleted file mode 100644 index b4a018670f..0000000000 --- a/docs/source/docs/programming/photonlib/controlling-led.rst +++ /dev/null @@ -1,14 +0,0 @@ -Controlling LEDs -================= -You can control the vision LEDs of supported hardware via PhotonLib using the ``setLED()`` method on a ``PhotonCamera`` instance. In Java and C++, an ``VisionLEDMode`` enum class is provided to choose values from. These values include, ``kOff``, ``kOn``, ``kBlink``, and ``kDefault``. ``kDefault`` uses the default LED value from the selected pipeline. - -.. tab-set-code:: - .. code-block:: java - - // Blink the LEDs. - camera.setLED(VisionLEDMode.kBlink); - - .. code-block:: c++ - - // Blink the LEDs. - camera.SetLED(photonlib::VisionLEDMode::kBlink); diff --git a/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md new file mode 100644 index 0000000000..9a531dde9c --- /dev/null +++ b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md @@ -0,0 +1,72 @@ +# Driver Mode and Pipeline Index/Latency + +After {ref}`creating a PhotonCamera `, one can toggle Driver Mode and change the Pipeline Index of the vision program from robot code. + +## Toggle Driver Mode + +You can use the `setDriverMode()`/`SetDriverMode()` (Java and C++ respectively) to toggle driver mode from your robot program. Driver mode is an unfiltered / normal view of the camera to be used while driving the robot. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Set driver mode to on. + camera.setDriverMode(true); + + .. code-block:: C++ + + // Set driver mode to on. + camera.SetDriverMode(true); + + .. code-block:: Python + + # Coming Soon! +``` + +## Setting the Pipeline Index + +You can use the `setPipelineIndex()`/`SetPipelineIndex()` (Java and C++ respectively) to dynamically change the vision pipeline from your robot program. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Change pipeline to 2 + camera.setPipelineIndex(2); + + .. code-block:: C++ + + // Change pipeline to 2 + camera.SetPipelineIndex(2); + + .. code-block:: Python + + # Coming Soon! +``` + +## Getting the Pipeline Latency + +You can also get the pipeline latency from a pipeline result using the `getLatencyMillis()`/`GetLatency()` (Java and C++ respectively) methods on a `PhotonPipelineResult`. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Get the pipeline latency. + double latencySeconds = result.getLatencyMillis() / 1000.0; + + .. code-block:: C++ + + // Get the pipeline latency. + units::second_t latency = result.GetLatency(); + + .. code-block:: Python + + # Coming Soon! +``` + +:::{note} +The C++ version of PhotonLib returns the latency in a unit container. For more information on the Units library, see [here](https://docs.wpilib.org/en/stable/docs/software/basic-programming/cpp-units.html). +::: diff --git a/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.rst b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.rst deleted file mode 100644 index 14c158d5d2..0000000000 --- a/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.rst +++ /dev/null @@ -1,53 +0,0 @@ -Driver Mode and Pipeline Index/Latency -====================================== - -After :ref:`creating a PhotonCamera `, one can toggle Driver Mode and change the Pipeline Index of the vision program from robot code. - -Toggle Driver Mode ------------------- -You can use the ``setDriverMode()``/``SetDriverMode()`` (Java and C++ respectively) to toggle driver mode from your robot program. Driver mode is an unfiltered / normal view of the camera to be used while driving the robot. - -.. tab-set-code:: - - .. code-block:: java - - // Set driver mode to on. - camera.setDriverMode(true); - - .. code-block:: C++ - - // Set driver mode to on. - camera.SetDriverMode(true); - -Setting the Pipeline Index --------------------------- -You can use the ``setPipelineIndex()``/``SetPipelineIndex()`` (Java and C++ respectively) to dynamically change the vision pipeline from your robot program. - -.. tab-set-code:: - - .. code-block:: java - - // Change pipeline to 2 - camera.setPipelineIndex(2); - - .. code-block:: C++ - - // Change pipeline to 2 - camera.SetPipelineIndex(2); - -Getting the Pipeline Latency ----------------------------- -You can also get the pipeline latency from a pipeline result using the ``getLatencyMillis()``/``GetLatency()`` (Java and C++ respectively) methods on a ``PhotonPipelineResult``. - -.. tab-set-code:: - .. code-block:: java - - // Get the pipeline latency. - double latencySeconds = result.getLatencyMillis() / 1000.0; - - .. code-block:: c++ - - // Get the pipeline latency. - units::second_t latency = result.GetLatency(); - -.. note:: The C++ version of PhotonLib returns the latency in a unit container. For more information on the Units library, see `here `_. diff --git a/docs/source/docs/programming/photonlib/getting-target-data.md b/docs/source/docs/programming/photonlib/getting-target-data.md new file mode 100644 index 0000000000..1d14088cf4 --- /dev/null +++ b/docs/source/docs/programming/photonlib/getting-target-data.md @@ -0,0 +1,257 @@ +# Getting Target Data + +## Constructing a PhotonCamera + +### What is a PhotonCamera? + +`PhotonCamera` is a class in PhotonLib that allows a user to interact with one camera that is connected to hardware that is running PhotonVision. Through this class, users can retrieve yaw, pitch, roll, robot-relative pose, latency, and a wealth of other information. + +The `PhotonCamera` class has two constructors: one that takes a `NetworkTable` and another that takes in the name of the network table that PhotonVision is broadcasting information over. For ease of use, it is recommended to use the latter. The name of the NetworkTable (for the string constructor) should be the same as the camera's nickname (from the PhotonVision UI). + +```{eval-rst} +.. tab-set-code:: + + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/aimattarget/Robot.java + :language: java + :lines: 51-52 + + .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/aimattarget/include/Robot.h + :language: c++ + :lines: 42-43 + + .. code-block:: Python + + # Change this to match the name of your camera as shown in the web ui + self.camera = PhotonCamera("your_camera_name_here") + +``` + +:::{warning} +Teams must have unique names for all of their cameras regardless of which coprocessor they are attached to. +::: + +## Getting the Pipeline Result + +### What is a Photon Pipeline Result? + +A `PhotonPipelineResult` is a container that contains all information about currently detected targets from a `PhotonCamera`. You can retrieve the latest pipeline result using the PhotonCamera instance. + +Use the `getLatestResult()`/`GetLatestResult()` (Java and C++ respectively) to obtain the latest pipeline result. An advantage of using this method is that it returns a container with information that is guaranteed to be from the same timestamp. This is important if you are using this data for latency compensation or in an estimator. + +```{eval-rst} +.. tab-set-code:: + + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/aimattarget/Robot.java + :language: java + :lines: 79-80 + + .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/aimattarget/cpp/Robot.cpp + :language: c++ + :lines: 35-36 + + .. code-block:: Python + + # Query the latest result from PhotonVision + result = self.camera.getLatestResult() + + +``` + +:::{note} +Unlike other vision software solutions, using the latest result guarantees that all information is from the same timestamp. This is achievable because the PhotonVision backend sends a byte-packed string of data which is then deserialized by PhotonLib to get target data. For more information, check out the [PhotonLib source code](https://github.com/PhotonVision/photonvision/tree/master/photon-lib). +::: + +## Checking for Existence of Targets + +Each pipeline result has a `hasTargets()`/`HasTargets()` (Java and C++ respectively) method to inform the user as to whether the result contains any targets. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Check if the latest result has any targets. + boolean hasTargets = result.hasTargets(); + + .. code-block:: C++ + + // Check if the latest result has any targets. + bool hasTargets = result.HasTargets(); + + .. code-block:: Python + + # Check if the latest result has any targets. + hasTargets = result.hasTargets() +``` + +:::{warning} +In Java/C++, You must *always* check if the result has a target via `hasTargets()`/`HasTargets()` before getting targets or else you may get a null pointer exception. Further, you must use the same result in every subsequent call in that loop. +::: + +## Getting a List of Targets + +### What is a Photon Tracked Target? + +A tracked target contains information about each target from a pipeline result. This information includes yaw, pitch, area, and robot relative pose. + +You can get a list of tracked targets using the `getTargets()`/`GetTargets()` (Java and C++ respectively) method from a pipeline result. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Get a list of currently tracked targets. + List targets = result.getTargets(); + + .. code-block:: C++ + + // Get a list of currently tracked targets. + wpi::ArrayRef targets = result.GetTargets(); + + .. code-block:: Python + + # Get a list of currently tracked targets. + targets = result.getTargets() +``` + +## Getting the Best Target + +You can get the {ref}`best target ` using `getBestTarget()`/`GetBestTarget()` (Java and C++ respectively) method from the pipeline result. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Get the current best target. + PhotonTrackedTarget target = result.getBestTarget(); + + .. code-block:: C++ + + // Get the current best target. + photonlib::PhotonTrackedTarget target = result.GetBestTarget(); + + + .. code-block:: Python + + # Coming Soon! + +``` + +## Getting Data From A Target + +- double `getYaw()`/`GetYaw()`: The yaw of the target in degrees (positive right). +- double `getPitch()`/`GetPitch()`: The pitch of the target in degrees (positive up). +- double `getArea()`/`GetArea()`: The area (how much of the camera feed the bounding box takes up) as a percent (0-100). +- double `getSkew()`/`GetSkew()`: The skew of the target in degrees (counter-clockwise positive). +- double\[\] `getCorners()`/`GetCorners()`: The 4 corners of the minimum bounding box rectangle. +- Transform2d `getCameraToTarget()`/`GetCameraToTarget()`: The camera to target transform. See [2d transform documentation here](https://docs.wpilib.org/en/latest/docs/software/advanced-controls/geometry/transformations.html#transform2d-and-twist2d). + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Get information from target. + double yaw = target.getYaw(); + double pitch = target.getPitch(); + double area = target.getArea(); + double skew = target.getSkew(); + Transform2d pose = target.getCameraToTarget(); + List corners = target.getCorners(); + + .. code-block:: C++ + + // Get information from target. + double yaw = target.GetYaw(); + double pitch = target.GetPitch(); + double area = target.GetArea(); + double skew = target.GetSkew(); + frc::Transform2d pose = target.GetCameraToTarget(); + wpi::SmallVector, 4> corners = target.GetCorners(); + + .. code-block:: Python + + # Get information from target. + yaw = target.getYaw() + pitch = target.getPitch() + area = target.getArea() + skew = target.getSkew() + pose = target.getCameraToTarget() + corners = target.getDetectedCorners() +``` + +## Getting AprilTag Data From A Target + +:::{note} +All of the data above (**except skew**) is available when using AprilTags. +::: + +- int `getFiducialId()`/`GetFiducialId()`: The ID of the detected fiducial marker. +- double `getPoseAmbiguity()`/`GetPoseAmbiguity()`: How ambiguous the pose of the target is (see below). +- Transform3d `getBestCameraToTarget()`/`GetBestCameraToTarget()`: Get the transform that maps camera space (X = forward, Y = left, Z = up) to object/fiducial tag space (X forward, Y left, Z up) with the lowest reprojection error. +- Transform3d `getAlternateCameraToTarget()`/`GetAlternateCameraToTarget()`: Get the transform that maps camera space (X = forward, Y = left, Z = up) to object/fiducial tag space (X forward, Y left, Z up) with the highest reprojection error. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Get information from target. + int targetID = target.getFiducialId(); + double poseAmbiguity = target.getPoseAmbiguity(); + Transform3d bestCameraToTarget = target.getBestCameraToTarget(); + Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); + + .. code-block:: C++ + + // Get information from target. + int targetID = target.GetFiducialId(); + double poseAmbiguity = target.GetPoseAmbiguity(); + frc::Transform3d bestCameraToTarget = target.getBestCameraToTarget(); + frc::Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); + + .. code-block:: Python + + # Get information from target. + targetID = target.getFiducialId() + poseAmbiguity = target.getPoseAmbiguity() + bestCameraToTarget = target.getBestCameraToTarget() + alternateCameraToTarget = target.getAlternateCameraToTarget() +``` + +## Saving Pictures to File + +A `PhotonCamera` can save still images from the input or output video streams to file. This is useful for debugging what a camera is seeing while on the field and confirming targets are being identified properly. + +Images are stored within the PhotonVision configuration directory. Running the "Export" operation in the settings tab will download a .zip file which contains the image captures. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Capture pre-process camera stream image + camera.takeInputSnapshot(); + + // Capture post-process camera stream image + camera.takeOutputSnapshot(); + + .. code-block:: C++ + + // Capture pre-process camera stream image + camera.TakeInputSnapshot(); + + // Capture post-process camera stream image + camera.TakeOutputSnapshot(); + + .. code-block:: Python + + # Capture pre-process camera stream image + camera.takeInputSnapshot() + + # Capture post-process camera stream image + camera.takeOutputSnapshot() +``` + +:::{note} +Saving images to file takes a bit of time and uses up disk space, so doing it frequently is not recommended. In general, the camera will save an image every 500ms. Calling these methods faster will not result in additional images. Consider tying image captures to a button press on the driver controller, or an appropriate point in an autonomous routine. +::: diff --git a/docs/source/docs/programming/photonlib/getting-target-data.rst b/docs/source/docs/programming/photonlib/getting-target-data.rst deleted file mode 100644 index 1e9ddc662a..0000000000 --- a/docs/source/docs/programming/photonlib/getting-target-data.rst +++ /dev/null @@ -1,241 +0,0 @@ -Getting Target Data -=================== - -Constructing a PhotonCamera ---------------------------- - -What is a PhotonCamera? -^^^^^^^^^^^^^^^^^^^^^^^ -``PhotonCamera`` is a class in PhotonLib that allows a user to interact with one camera that is connected to hardware that is running PhotonVision. Through this class, users can retrieve yaw, pitch, roll, robot-relative pose, latency, and a wealth of other information. - - -The ``PhotonCamera`` class has two constructors: one that takes a ``NetworkTable`` and another that takes in the name of the network table that PhotonVision is broadcasting information over. For ease of use, it is recommended to use the latter. The name of the NetworkTable (for the string constructor) should be the same as the camera's nickname (from the PhotonVision UI). - -.. tab-set-code:: - - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/aimattarget/Robot.java - :language: java - :lines: 51-52 - - .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/aimattarget/include/Robot.h - :language: c++ - :lines: 42-43 - - .. code-block:: python - - # Change this to match the name of your camera as shown in the web ui - self.camera = PhotonCamera("your_camera_name_here") - - -.. warning:: Teams must have unique names for all of their cameras regardless of which coprocessor they are attached to. - -Getting the Pipeline Result ---------------------------- - -What is a Photon Pipeline Result? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A ``PhotonPipelineResult`` is a container that contains all information about currently detected targets from a ``PhotonCamera``. You can retrieve the latest pipeline result using the PhotonCamera instance. - -Use the ``getLatestResult()``/``GetLatestResult()`` (Java and C++ respectively) to obtain the latest pipeline result. An advantage of using this method is that it returns a container with information that is guaranteed to be from the same timestamp. This is important if you are using this data for latency compensation or in an estimator. - -.. tab-set-code:: - - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/aimattarget/Robot.java - :language: java - :lines: 79-80 - - .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/aimattarget/cpp/Robot.cpp - :language: c++ - :lines: 35-36 - - .. code-block:: python - - # Query the latest result from PhotonVision - result = self.camera.getLatestResult() - - - -.. note:: Unlike other vision software solutions, using the latest result guarantees that all information is from the same timestamp. This is achievable because the PhotonVision backend sends a byte-packed string of data which is then deserialized by PhotonLib to get target data. For more information, check out the `PhotonLib source code `_. - - - -Checking for Existence of Targets ---------------------------------- -Each pipeline result has a ``hasTargets()``/``HasTargets()`` (Java and C++ respectively) method to inform the user as to whether the result contains any targets. - -.. tab-set-code:: - .. code-block:: java - - // Check if the latest result has any targets. - boolean hasTargets = result.hasTargets(); - - .. code-block:: c++ - - // Check if the latest result has any targets. - bool hasTargets = result.HasTargets(); - - .. code-block:: python - - # Check if the latest result has any targets. - hasTargets = result.hasTargets() - -.. warning:: In Java/C++, You must *always* check if the result has a target via ``hasTargets()``/``HasTargets()`` before getting targets or else you may get a null pointer exception. Further, you must use the same result in every subsequent call in that loop. - - -Getting a List of Targets -------------------------- - -What is a Photon Tracked Target? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A tracked target contains information about each target from a pipeline result. This information includes yaw, pitch, area, and robot relative pose. - - -You can get a list of tracked targets using the ``getTargets()``/``GetTargets()`` (Java and C++ respectively) method from a pipeline result. - -.. tab-set-code:: - .. code-block:: java - - // Get a list of currently tracked targets. - List targets = result.getTargets(); - - .. code-block:: c++ - - // Get a list of currently tracked targets. - wpi::ArrayRef targets = result.GetTargets(); - - .. code-block:: python - - # Get a list of currently tracked targets. - targets = result.getTargets() - -Getting the Best Target ------------------------ -You can get the :ref:`best target ` using ``getBestTarget()``/``GetBestTarget()`` (Java and C++ respectively) method from the pipeline result. - -.. tab-set-code:: - .. code-block:: java - - // Get the current best target. - PhotonTrackedTarget target = result.getBestTarget(); - - .. code-block:: c++ - - // Get the current best target. - photonlib::PhotonTrackedTarget target = result.GetBestTarget(); - - - .. code-block:: python - - # TODO - Not currently supported - - -Getting Data From A Target --------------------------- -* double ``getYaw()``/``GetYaw()``: The yaw of the target in degrees (positive right). -* double ``getPitch()``/``GetPitch()``: The pitch of the target in degrees (positive up). -* double ``getArea()``/``GetArea()``: The area (how much of the camera feed the bounding box takes up) as a percent (0-100). -* double ``getSkew()``/``GetSkew()``: The skew of the target in degrees (counter-clockwise positive). -* double[] ``getCorners()``/``GetCorners()``: The 4 corners of the minimum bounding box rectangle. -* Transform2d ``getCameraToTarget()``/``GetCameraToTarget()``: The camera to target transform. See `2d transform documentation here `_. - - -.. tab-set-code:: - .. code-block:: java - - // Get information from target. - double yaw = target.getYaw(); - double pitch = target.getPitch(); - double area = target.getArea(); - double skew = target.getSkew(); - Transform2d pose = target.getCameraToTarget(); - List corners = target.getCorners(); - - .. code-block:: c++ - - // Get information from target. - double yaw = target.GetYaw(); - double pitch = target.GetPitch(); - double area = target.GetArea(); - double skew = target.GetSkew(); - frc::Transform2d pose = target.GetCameraToTarget(); - wpi::SmallVector, 4> corners = target.GetCorners(); - - .. code-block:: python - - # Get information from target. - yaw = target.getYaw() - pitch = target.getPitch() - area = target.getArea() - skew = target.getSkew() - pose = target.getCameraToTarget() - corners = target.getDetectedCorners() - -Getting AprilTag Data From A Target ------------------------------------ -.. note:: All of the data above (**except skew**) is available when using AprilTags. - -* int ``getFiducialId()``/``GetFiducialId()``: The ID of the detected fiducial marker. -* double ``getPoseAmbiguity()``/``GetPoseAmbiguity()``: How ambiguous the pose of the target is (see below). -* Transform3d ``getBestCameraToTarget()``/``GetBestCameraToTarget()``: Get the transform that maps camera space (X = forward, Y = left, Z = up) to object/fiducial tag space (X forward, Y left, Z up) with the lowest reprojection error. -* Transform3d ``getAlternateCameraToTarget()``/``GetAlternateCameraToTarget()``: Get the transform that maps camera space (X = forward, Y = left, Z = up) to object/fiducial tag space (X forward, Y left, Z up) with the highest reprojection error. - -.. tab-set-code:: - .. code-block:: java - - // Get information from target. - int targetID = target.getFiducialId(); - double poseAmbiguity = target.getPoseAmbiguity(); - Transform3d bestCameraToTarget = target.getBestCameraToTarget(); - Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); - - .. code-block:: c++ - - // Get information from target. - int targetID = target.GetFiducialId(); - double poseAmbiguity = target.GetPoseAmbiguity(); - frc::Transform3d bestCameraToTarget = target.getBestCameraToTarget(); - frc::Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); - - .. code-block:: python - - # Get information from target. - targetID = target.getFiducialId() - poseAmbiguity = target.getPoseAmbiguity() - bestCameraToTarget = target.getBestCameraToTarget() - alternateCameraToTarget = target.getAlternateCameraToTarget() - -Saving Pictures to File ------------------------ -A ``PhotonCamera`` can save still images from the input or output video streams to file. This is useful for debugging what a camera is seeing while on the field and confirming targets are being identified properly. - -Images are stored within the PhotonVision configuration directory. Running the "Export" operation in the settings tab will download a .zip file which contains the image captures. - -.. tab-set-code:: - - .. code-block:: java - - // Capture pre-process camera stream image - camera.takeInputSnapshot(); - - // Capture post-process camera stream image - camera.takeOutputSnapshot(); - - .. code-block:: C++ - - // Capture pre-process camera stream image - camera.TakeInputSnapshot(); - - // Capture post-process camera stream image - camera.TakeOutputSnapshot(); - - .. code-block:: python - - # Capture pre-process camera stream image - camera.takeInputSnapshot() - - # Capture post-process camera stream image - camera.takeOutputSnapshot() - -.. note:: Saving images to file takes a bit of time and uses up disk space, so doing it frequently is not recommended. In general, the camera will save an image every 500ms. Calling these methods faster will not result in additional images. Consider tying image captures to a button press on the driver controller, or an appropriate point in an autonomous routine. diff --git a/docs/source/docs/programming/photonlib/images/photonlib-vendordep-json.png b/docs/source/docs/programming/photonlib/images/photonlib-vendordep-json.png new file mode 100644 index 0000000000..6362174ca4 Binary files /dev/null and b/docs/source/docs/programming/photonlib/images/photonlib-vendordep-json.png differ diff --git a/docs/source/docs/programming/photonlib/index.md b/docs/source/docs/programming/photonlib/index.md new file mode 100644 index 0000000000..0e38862d5f --- /dev/null +++ b/docs/source/docs/programming/photonlib/index.md @@ -0,0 +1,12 @@ +# PhotonLib: Robot Code Interface + +```{toctree} +:maxdepth: 1 + +adding-vendordep +getting-target-data +using-target-data +robot-pose-estimator +driver-mode-pipeline-index +controlling-led +``` diff --git a/docs/source/docs/programming/photonlib/index.rst b/docs/source/docs/programming/photonlib/index.rst deleted file mode 100644 index d1d1c381e3..0000000000 --- a/docs/source/docs/programming/photonlib/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -PhotonLib: Robot Code Interface -=============================== - -.. toctree:: - :maxdepth: 1 - - adding-vendordep - getting-target-data - using-target-data - robot-pose-estimator - driver-mode-pipeline-index - controlling-led diff --git a/docs/source/docs/programming/photonlib/robot-pose-estimator.md b/docs/source/docs/programming/photonlib/robot-pose-estimator.md new file mode 100644 index 0000000000..283d86bb5d --- /dev/null +++ b/docs/source/docs/programming/photonlib/robot-pose-estimator.md @@ -0,0 +1,142 @@ +# AprilTags and PhotonPoseEstimator + +:::{note} +For more information on how to methods to get AprilTag data, look {ref}`here `. +::: + +PhotonLib includes a `PhotonPoseEstimator` class, which allows you to combine the pose data from all tags in view in order to get a field relative pose. The `PhotonPoseEstimator` class works with one camera per object instance, but more than one instance may be created. + +## Creating an `AprilTagFieldLayout` + +`AprilTagFieldLayout` is used to represent a layout of AprilTags within a space (field, shop at home, classroom, etc.). WPILib provides a JSON that describes the layout of AprilTags on the field which you can then use in the AprilTagFieldLayout constructor. You can also specify a custom layout. + +The API documentation can be found in here: [Java](https://github.wpilib.org/allwpilib/docs/release/java/edu/wpi/first/apriltag/AprilTagFieldLayout.html) and [C++](https://github.wpilib.org/allwpilib/docs/release/cpp/classfrc_1_1_april_tag_field_layout.html). + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // The field from AprilTagFields will be different depending on the game. + AprilTagFieldLayout aprilTagFieldLayout = AprilTagFields.k2024Crescendo.loadAprilTagLayoutField(); + + .. code-block:: C++ + + // The parameter for LoadAPrilTagLayoutField will be different depending on the game. + frc::AprilTagFieldLayout aprilTagFieldLayout = frc::LoadAprilTagLayoutField(frc::AprilTagField::k2024Crescendo); + + .. code-block:: Python + + # Coming Soon! + +``` + +## Creating a `PhotonPoseEstimator` + +The PhotonPoseEstimator has a constructor that takes an `AprilTagFieldLayout` (see above), `PoseStrategy`, `PhotonCamera`, and `Transform3d`. `PoseStrategy` has six possible values: + +- MULTI_TAG_PNP_ON_COPROCESSOR + - Calculates a new robot position estimate by combining all visible tag corners. Recommended for all teams as it will be the most accurate. + - Must configure the AprilTagFieldLayout properly in the UI, please see {ref}`here ` for more information. +- LOWEST_AMBIGUITY + - Choose the Pose with the lowest ambiguity. +- CLOSEST_TO_CAMERA_HEIGHT + - Choose the Pose which is closest to the camera height. +- CLOSEST_TO_REFERENCE_POSE + - Choose the Pose which is closest to the pose from setReferencePose(). +- CLOSEST_TO_LAST_POSE + - Choose the Pose which is closest to the last pose calculated. +- AVERAGE_BEST_TARGETS + - Choose the Pose which is the average of all the poses from each tag. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + //Forward Camera + cam = new PhotonCamera("testCamera"); + Transform3d robotToCam = new Transform3d(new Translation3d(0.5, 0.0, 0.5), new Rotation3d(0,0,0)); //Cam mounted facing forward, half a meter forward of center, half a meter up from center. + + // Construct PhotonPoseEstimator + PhotonPoseEstimator photonPoseEstimator = new PhotonPoseEstimator(aprilTagFieldLayout, PoseStrategy.CLOSEST_TO_REFERENCE_POSE, cam, robotToCam); + + .. code-block:: C++ + + // Forward Camera + std::shared_ptr cameraOne = + std::make_shared("testCamera"); + // Camera is mounted facing forward, half a meter forward of center, half a + // meter up from center. + frc::Transform3d robotToCam = + frc::Transform3d(frc::Translation3d(0.5_m, 0_m, 0.5_m), + frc::Rotation3d(0_rad, 0_rad, 0_rad)); + + // ... Add other cameras here + + // Assemble the list of cameras & mount locations + std::vector< + std::pair, frc::Transform3d>> + cameras; + cameras.push_back(std::make_pair(cameraOne, robotToCam)); + + photonlib::RobotPoseEstimator estimator( + aprilTags, photonlib::CLOSEST_TO_REFERENCE_POSE, cameras); + + .. code-block:: Python + + kRobotToCam = wpimath.geometry.Transform3d( + wpimath.geometry.Translation3d(0.5, 0.0, 0.5), + wpimath.geometry.Rotation3d.fromDegrees(0.0, -30.0, 0.0), + ) + + self.cam = PhotonCamera("YOUR CAMERA NAME") + + self.camPoseEst = PhotonPoseEstimator( + loadAprilTagLayoutField(AprilTagField.k2024Crescendo), + PoseStrategy.CLOSEST_TO_REFERENCE_POSE, + self.cam, + kRobotToCam + ) +``` + +## Using a `PhotonPoseEstimator` + +Calling `update()` on your `PhotonPoseEstimator` will return an `EstimatedRobotPose`, which includes a `Pose3d` of the latest estimated pose (using the selected strategy) along with a `double` of the timestamp when the robot pose was estimated. You should be updating your [drivetrain pose estimator](https://docs.wpilib.org/en/latest/docs/software/advanced-controls/state-space/state-space-pose-estimators.html) with the result from the `PhotonPoseEstimator` every loop using `addVisionMeasurement()`. + +```{eval-rst} +.. tab-set-code:: + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/357d8a518a93f7a1f8084a79449249e613b605a7/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/PhotonCameraWrapper.java + :language: java + :lines: 85-88 + + .. code-block:: C++ + + std::pair getEstimatedGlobalPose( + frc::Pose3d prevEstimatedRobotPose) { + robotPoseEstimator.SetReferencePose(prevEstimatedRobotPose); + units::millisecond_t currentTime = frc::Timer::GetFPGATimestamp(); + auto result = robotPoseEstimator.Update(); + if (result.second) { + return std::make_pair<>(result.first.ToPose2d(), + currentTime - result.second); + } else { + return std::make_pair(frc::Pose2d(), 0_ms); + } + } + + .. code-block:: Python + + + +``` + +You should be updating your [drivetrain pose estimator](https://docs.wpilib.org/en/latest/docs/software/advanced-controls/state-space/state-space-pose-estimators.html) with the result from the `RobotPoseEstimator` every loop using `addVisionMeasurement()`. TODO: add example note + +## Additional `PhotonPoseEstimator` Methods + +### `setReferencePose(Pose3d referencePose)` + +Updates the stored reference pose when using the CLOSEST_TO_REFERENCE_POSE strategy. + +### `setLastPose(Pose3d lastPose)` + +Update the stored last pose. Useful for setting the initial estimate when using the CLOSEST_TO_LAST_POSE strategy. diff --git a/docs/source/docs/programming/photonlib/robot-pose-estimator.rst b/docs/source/docs/programming/photonlib/robot-pose-estimator.rst deleted file mode 100644 index c367c926f8..0000000000 --- a/docs/source/docs/programming/photonlib/robot-pose-estimator.rst +++ /dev/null @@ -1,113 +0,0 @@ -AprilTags and PhotonPoseEstimator -================================= - -.. note:: For more information on how to methods to get AprilTag data, look :ref:`here `. - -PhotonLib includes a ``PhotonPoseEstimator`` class, which allows you to combine the pose data from all tags in view in order to get a field relative pose. The ``PhotonPoseEstimator`` class works with one camera per object instance, but more than one instance may be created. - -Creating an ``AprilTagFieldLayout`` ------------------------------------ -``AprilTagFieldLayout`` is used to represent a layout of AprilTags within a space (field, shop at home, classroom, etc.). WPILib provides a JSON that describes the layout of AprilTags on the field which you can then use in the AprilTagFieldLayout constructor. You can also specify a custom layout. - -The API documentation can be found in here: `Java `_ and `C++ `_. - -.. tab-set-code:: - .. code-block:: java - - // The field from AprilTagFields will be different depending on the game. - AprilTagFieldLayout aprilTagFieldLayout = AprilTagFields.k2024Crescendo.loadAprilTagLayoutField(); - - .. code-block:: c++ - - // The parameter for LoadAPrilTagLayoutField will be different depending on the game. - frc::AprilTagFieldLayout aprilTagFieldLayout = frc::LoadAprilTagLayoutField(frc::AprilTagField::k2024Crescendo); - - -Creating a ``PhotonPoseEstimator`` ----------------------------------- -The PhotonPoseEstimator has a constructor that takes an ``AprilTagFieldLayout`` (see above), ``PoseStrategy``, ``PhotonCamera``, and ``Transform3d``. ``PoseStrategy`` has six possible values: - -* MULTI_TAG_PNP_ON_COPROCESSOR - * Calculates a new robot position estimate by combining all visible tag corners. Recommended for all teams as it will be the most accurate. - * Must configure the AprilTagFieldLayout properly in the UI, please see :ref:`here ` for more information. -* LOWEST_AMBIGUITY - * Choose the Pose with the lowest ambiguity. -* CLOSEST_TO_CAMERA_HEIGHT - * Choose the Pose which is closest to the camera height. -* CLOSEST_TO_REFERENCE_POSE - * Choose the Pose which is closest to the pose from setReferencePose(). -* CLOSEST_TO_LAST_POSE - * Choose the Pose which is closest to the last pose calculated. -* AVERAGE_BEST_TARGETS - * Choose the Pose which is the average of all the poses from each tag. - -.. tab-set-code:: - .. code-block:: java - - //Forward Camera - cam = new PhotonCamera("testCamera"); - Transform3d robotToCam = new Transform3d(new Translation3d(0.5, 0.0, 0.5), new Rotation3d(0,0,0)); //Cam mounted facing forward, half a meter forward of center, half a meter up from center. - - // Construct PhotonPoseEstimator - PhotonPoseEstimator photonPoseEstimator = new PhotonPoseEstimator(aprilTagFieldLayout, PoseStrategy.CLOSEST_TO_REFERENCE_POSE, cam, robotToCam); - - .. code-block:: c++ - - // Forward Camera - std::shared_ptr cameraOne = - std::make_shared("testCamera"); - // Camera is mounted facing forward, half a meter forward of center, half a - // meter up from center. - frc::Transform3d robotToCam = - frc::Transform3d(frc::Translation3d(0.5_m, 0_m, 0.5_m), - frc::Rotation3d(0_rad, 0_rad, 0_rad)); - - // ... Add other cameras here - - // Assemble the list of cameras & mount locations - std::vector< - std::pair, frc::Transform3d>> - cameras; - cameras.push_back(std::make_pair(cameraOne, robotToCam)); - - photonlib::RobotPoseEstimator estimator( - aprilTags, photonlib::CLOSEST_TO_REFERENCE_POSE, cameras); - -Using a ``PhotonPoseEstimator`` -------------------------------- -Calling ``update()`` on your ``PhotonPoseEstimator`` will return an ``EstimatedRobotPose``, which includes a ``Pose3d`` of the latest estimated pose (using the selected strategy) along with a ``double`` of the timestamp when the robot pose was estimated. You should be updating your `drivetrain pose estimator `_ with the result from the ``PhotonPoseEstimator`` every loop using ``addVisionMeasurement()``. - -.. tab-set-code:: - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/357d8a518a93f7a1f8084a79449249e613b605a7/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/PhotonCameraWrapper.java - :language: java - :lines: 85-88 - - .. code-block:: c++ - - std::pair getEstimatedGlobalPose( - frc::Pose3d prevEstimatedRobotPose) { - robotPoseEstimator.SetReferencePose(prevEstimatedRobotPose); - units::millisecond_t currentTime = frc::Timer::GetFPGATimestamp(); - auto result = robotPoseEstimator.Update(); - if (result.second) { - return std::make_pair<>(result.first.ToPose2d(), - currentTime - result.second); - } else { - return std::make_pair(frc::Pose2d(), 0_ms); - } - } - -You should be updating your `drivetrain pose estimator `_ with the result from the ``RobotPoseEstimator`` every loop using ``addVisionMeasurement()``. TODO: add example note - -Additional ``PhotonPoseEstimator`` Methods ------------------------------------------- - -``setReferencePose(Pose3d referencePose)`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Updates the stored reference pose when using the CLOSEST_TO_REFERENCE_POSE strategy. - -``setLastPose(Pose3d lastPose)`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Update the stored last pose. Useful for setting the initial estimate when using the CLOSEST_TO_LAST_POSE strategy. diff --git a/docs/source/docs/programming/photonlib/using-target-data.md b/docs/source/docs/programming/photonlib/using-target-data.md new file mode 100644 index 0000000000..ad4c516cf4 --- /dev/null +++ b/docs/source/docs/programming/photonlib/using-target-data.md @@ -0,0 +1,136 @@ +# Using Target Data + +A `PhotonUtils` class with helpful common calculations is included within `PhotonLib` to aid teams in using AprilTag data in order to get positional information on the field. This class contains two methods, `calculateDistanceToTargetMeters()`/`CalculateDistanceToTarget()` and `estimateTargetTranslation2d()`/`EstimateTargetTranslation()` (Java and C++ respectively). + +## Estimating Field Relative Pose with AprilTags + +`estimateFieldToRobotAprilTag(Transform3d cameraToTarget, Pose3d fieldRelativeTagPose, Transform3d cameraToRobot)` returns your robot's `Pose3d` on the field using the pose of the AprilTag relative to the camera, pose of the AprilTag relative to the field, and the transform from the camera to the origin of the robot. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Calculate robot's field relative pose + Pose3d robotPose = PhotonUtils.estimateFieldToRobotAprilTag(target.getBestCameraToTarget(), aprilTagFieldLayout.getTagPose(target.getFiducialId()), cameraToRobot); + .. code-block:: C++ + + //TODO + + .. code-block:: Python + + # Coming Soon! +``` + +## Estimating Field Relative Pose (Traditional) + +You can get your robot's `Pose2D` on the field using various camera data, target yaw, gyro angle, target pose, and camera position. This method estimates the target's relative position using `estimateCameraToTargetTranslation` (which uses pitch and yaw to estimate range and heading), and the robot's gyro to estimate the rotation of the target. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Calculate robot's field relative pose + Pose2D robotPose = PhotonUtils.estimateFieldToRobot( + kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, Rotation2d.fromDegrees(-target.getYaw()), gyro.getRotation2d(), targetPose, cameraToRobot); + + .. code-block:: C++ + + // Calculate robot's field relative pose + frc::Pose2D robotPose = photonlib::EstimateFieldToRobot( + kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, frc::Rotation2d(units::degree_t(-target.GetYaw())), frc::Rotation2d(units::degree_t(gyro.GetRotation2d)), targetPose, cameraToRobot); + + .. code-block:: Python + + # Coming Soon! + +``` + +## Calculating Distance to Target + +If your camera is at a fixed height on your robot and the height of the target is fixed, you can calculate the distance to the target based on your camera's pitch and the pitch to the target. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // TODO + + .. code-block:: C++ + + // TODO + + .. code-block:: Python + + # Coming Soon! + +``` + +:::{note} +The C++ version of PhotonLib uses the Units library. For more information, see [here](https://docs.wpilib.org/en/stable/docs/software/basic-programming/cpp-units.html). +::: + +## Calculating Distance Between Two Poses + +`getDistanceToPose(Pose2d robotPose, Pose2d targetPose)` allows you to calculate the distance between two poses. This is useful when using AprilTags, given that there may not be an AprilTag directly on the target. + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + double distanceToTarget = PhotonUtils.getDistanceToPose(robotPose, targetPose); + + .. code-block:: C++ + + //TODO + + .. code-block:: Python + + # Coming Soon! +``` + +## Estimating Camera Translation to Target + +You can get a [translation](https://docs.wpilib.org/en/latest/docs/software/advanced-controls/geometry/pose.html#translation) to the target based on the distance to the target (calculated above) and angle to the target (yaw). + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + // Calculate a translation from the camera to the target. + Translation2d translation = PhotonUtils.estimateCameraToTargetTranslation( + distanceMeters, Rotation2d.fromDegrees(-target.getYaw())); + + .. code-block:: C++ + + // Calculate a translation from the camera to the target. + frc::Translation2d translation = photonlib::PhotonUtils::EstimateCameraToTargetTranslationn( + distance, frc::Rotation2d(units::degree_t(-target.GetYaw()))); + + .. code-block:: Python + + # Coming Soon! + +``` + +:::{note} +We are negating the yaw from the camera from CV (computer vision) conventions to standard mathematical conventions. In standard mathematical conventions, as you turn counter-clockwise, angles become more positive. +::: + +## Getting the Yaw To a Pose + +`getYawToPose(Pose2d robotPose, Pose2d targetPose)` returns the `Rotation2d` between your robot and a target. This is useful when turning towards an arbitrary target on the field (ex. the center of the hub in 2022). + +```{eval-rst} +.. tab-set-code:: + .. code-block:: Java + + Rotation2d targetYaw = PhotonUtils.getYawToPose(robotPose, targetPose); + .. code-block:: C++ + + //TODO + + .. code-block:: Python + + # Coming Soon! +``` diff --git a/docs/source/docs/programming/photonlib/using-target-data.rst b/docs/source/docs/programming/photonlib/using-target-data.rst deleted file mode 100644 index 6df0b7fec0..0000000000 --- a/docs/source/docs/programming/photonlib/using-target-data.rst +++ /dev/null @@ -1,97 +0,0 @@ -Using Target Data -================= - -A ``PhotonUtils`` class with helpful common calculations is included within ``PhotonLib`` to aid teams in using target data in order to get positional information on the field. This class contains two methods, ``calculateDistanceToTargetMeters()``/``CalculateDistanceToTarget()`` and ``estimateTargetTranslation2d()``/``EstimateTargetTranslation()`` (Java and C++ respectively). - -Estimating Field Relative Pose with AprilTags ---------------------------------------------- -``estimateFieldToRobotAprilTag(Transform3d cameraToTarget, Pose3d fieldRelativeTagPose, Transform3d cameraToRobot)`` returns your robot's ``Pose3d`` on the field using the pose of the AprilTag relative to the camera, pose of the AprilTag relative to the field, and the transform from the camera to the origin of the robot. - -.. tab-set-code:: - .. code-block:: java - - // Calculate robot's field relative pose - Pose3d robotPose = PhotonUtils.estimateFieldToRobotAprilTag(target.getBestCameraToTarget(), aprilTagFieldLayout.getTagPose(target.getFiducialId()), cameraToRobot); - .. code-block:: c++ - - //TODO - -Estimating Field Relative Pose (Traditional) --------------------------------------------- - -You can get your robot's ``Pose2D`` on the field using various camera data, target yaw, gyro angle, target pose, and camera position. This method estimates the target's relative position using ``estimateCameraToTargetTranslation`` (which uses pitch and yaw to estimate range and heading), and the robot's gyro to estimate the rotation of the target. - -.. tab-set-code:: - .. code-block:: java - - // Calculate robot's field relative pose - Pose2D robotPose = PhotonUtils.estimateFieldToRobot( - kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, Rotation2d.fromDegrees(-target.getYaw()), gyro.getRotation2d(), targetPose, cameraToRobot); - - .. code-block:: c++ - - // Calculate robot's field relative pose - frc::Pose2D robotPose = photonlib::EstimateFieldToRobot( - kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, frc::Rotation2d(units::degree_t(-target.GetYaw())), frc::Rotation2d(units::degree_t(gyro.GetRotation2d)), targetPose, cameraToRobot); - - -Calculating Distance to Target ------------------------------- -If your camera is at a fixed height on your robot and the height of the target is fixed, you can calculate the distance to the target based on your camera's pitch and the pitch to the target. - -.. tab-set-code:: - - - .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/getinrange/Robot.java - :language: java - :lines: 78-94 - - .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/getinrange/cpp/Robot.cpp - :language: cpp - :lines: 33-46 - -.. note:: The C++ version of PhotonLib uses the Units library. For more information, see `here `_. - -Calculating Distance Between Two Poses --------------------------------------- -``getDistanceToPose(Pose2d robotPose, Pose2d targetPose)`` allows you to calculate the distance between two poses. This is useful when using AprilTags, given that there may not be an AprilTag directly on the target. - -.. tab-set-code:: - .. code-block:: java - - double distanceToTarget = PhotonUtils.getDistanceToPose(robotPose, targetPose); - - .. code-block:: c++ - - //TODO - -Estimating Camera Translation to Target ---------------------------------------- -You can get a `translation `_ to the target based on the distance to the target (calculated above) and angle to the target (yaw). - -.. tab-set-code:: - .. code-block:: java - - // Calculate a translation from the camera to the target. - Translation2d translation = PhotonUtils.estimateCameraToTargetTranslation( - distanceMeters, Rotation2d.fromDegrees(-target.getYaw())); - - .. code-block:: c++ - - // Calculate a translation from the camera to the target. - frc::Translation2d translation = photonlib::PhotonUtils::EstimateCameraToTargetTranslationn( - distance, frc::Rotation2d(units::degree_t(-target.GetYaw()))); - -.. note:: We are negating the yaw from the camera from CV (computer vision) conventions to standard mathematical conventions. In standard mathematical conventions, as you turn counter-clockwise, angles become more positive. - -Getting the Yaw To a Pose -------------------------- -``getYawToPose(Pose2d robotPose, Pose2d targetPose)`` returns the ``Rotation2d`` between your robot and a target. This is useful when turning towards an arbitrary target on the field (ex. the center of the hub in 2022). - -.. tab-set-code:: - .. code-block:: java - - Rotation2d targetYaw = PhotonUtils.getYawToPose(robotPose, targetPose); - .. code-block:: c++ - - //TODO diff --git a/docs/source/docs/reflectiveAndShape/3D.rst b/docs/source/docs/reflectiveAndShape/3D.md similarity index 53% rename from docs/source/docs/reflectiveAndShape/3D.rst rename to docs/source/docs/reflectiveAndShape/3D.md index bc820bb8b1..db0a745a48 100644 --- a/docs/source/docs/reflectiveAndShape/3D.rst +++ b/docs/source/docs/reflectiveAndShape/3D.md @@ -1,24 +1,22 @@ -3D Tuning -========= +# 3D Tuning -In 3D mode, the SolvePNP algorithm is used to compute the position and rotation of the target relative to the robot. This requires your :ref:`camera to be calibrated ` which can be done through the cameras tab. +In 3D mode, the SolvePNP algorithm is used to compute the position and rotation of the AprilTag or other target relative to the robot. This requires your {ref}`camera to be calibrated ` which can be done through the cameras tab. The target model dropdown is used to select the target model used to compute target position. This should match the target your camera will be tracking. If solvePNP is working correctly, the target should be displayed as a small rectangle within the "Target Location" minimap. The X/Y/Angle reading will also be displayed in the "Target Info" card. -.. raw:: html +```{raw} html + + +``` - - - -Contour Simplification ----------------------- +## Contour Simplification (Non-Apriltag) 3D mode internally computes a polygon that approximates the target contour being tracked. This polygon is used to detect the extreme corners of the target. The contour simplification slider changes how far from the original contour the approximation is allowed to deviate. Note that the approximate polygon is drawn on the output image for tuning. diff --git a/docs/source/docs/reflectiveAndShape/contour-filtering.rst b/docs/source/docs/reflectiveAndShape/contour-filtering.md similarity index 56% rename from docs/source/docs/reflectiveAndShape/contour-filtering.rst rename to docs/source/docs/reflectiveAndShape/contour-filtering.md index 1d4367cd4a..75783ac1cd 100644 --- a/docs/source/docs/reflectiveAndShape/contour-filtering.rst +++ b/docs/source/docs/reflectiveAndShape/contour-filtering.md @@ -1,63 +1,56 @@ -Contour Filtering and Grouping -============================== +# Contour Filtering and Grouping Contours that make it past thresholding are filtered and grouped so that only likely targets remain. -Filtering Options -^^^^^^^^^^^^^^^^^ +## Filtering Options -Reflective ----------- +### Reflective Contours can be filtered by area, width/height ratio, "fullness", and "speckle rejection" percentage. Area filtering adjusts the percentage of overall image area that contours are allowed to occupy. The area of valid contours is shown in the "target info" card on the right. -Ratio adjusts the width to height ratio of allowable contours. For example, a width to height filtering range of [2, 3] would allow targets that are 250 x 100 pixels in size through. +Ratio adjusts the width to height ratio of allowable contours. For example, a width to height filtering range of \[2, 3\] would allow targets that are 250 x 100 pixels in size through. Fullness is a measurement of the ratio between the contour's area and the area of its bounding rectangle. This can be used to reject contours that are for example solid blobs. Finally, speckle rejection is an algorithm that can discard contours whose area are below a certain percentage of the average area of all visible contours. This might be useful in rejecting stray lights or image noise. -.. raw:: html +```{raw} html + +``` - - -Colored Shape -------------- +### Colored Shape The contours tab has new options for specifying the properties of your colored shape. The target shape types are: -* Circle - No edges -* Triangle - 3 edges -* Quadrilateral - 4 edges -* Polygon - Any number of edges +- Circle - No edges +- Triangle - 3 edges +- Quadrilateral - 4 edges +- Polygon - Any number of edges -.. image:: images/triangle.png - :width: 600 - :alt: Dropdown to select the colored shape pipeline type. +```{image} images/triangle.png +:alt: Dropdown to select the colored shape pipeline type. +:width: 600 +``` Only the settings used for the current target shape are available. -* Shape Simplification - This is the only setting available for polygon, triangle, and quadrilateral target shapes. If you are having issues with edges being "noisy" or "unclean", adjust this setting to be higher (>75). This high setting helps prevent imperfections in the edge from being counted as a separate edge. - -* Circle Match Distance - How close the centroid of a contour must be to the center of the circle in order for them to be matched. This value is usually pretty small (<25) as you usually only want to identify circles that are nearly centered in the contour. - -* Radius - Percentage of the frame that the radius of the circle represents. +- Shape Simplification - This is the only setting available for polygon, triangle, and quadrilateral target shapes. If you are having issues with edges being "noisy" or "unclean", adjust this setting to be higher (>75). This high setting helps prevent imperfections in the edge from being counted as a separate edge. +- Circle Match Distance - How close the centroid of a contour must be to the center of the circle in order for them to be matched. This value is usually pretty small (\<25) as you usually only want to identify circles that are nearly centered in the contour. +- Radius - Percentage of the frame that the radius of the circle represents. +- Max Canny Threshold - This sets the amount of change between pixels needed to be considered an edge. The smaller it is, the more false circles may be detected. Circles with more points along their ring having high contrast values will be returned first. +- Circle Accuracy - This determines how perfect the circle contour must be in order to be considered a circle. Low values (\<40) are required to detect things that aren't perfect circles. -* Max Canny Threshold - This sets the amount of change between pixels needed to be considered an edge. The smaller it is, the more false circles may be detected. Circles with more points along their ring having high contrast values will be returned first. +```{image} images/pumpkin.png +:alt: Dropdown to select the colored shape pipeline type. +:width: 600 +``` -* Circle Accuracy - This determines how perfect the circle contour must be in order to be considered a circle. Low values (<40) are required to detect things that aren't perfect circles. - -.. image:: images/pumpkin.png - :width: 600 - :alt: Dropdown to select the colored shape pipeline type. - -Contour Grouping and Sorting -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +## Contour Grouping and Sorting These options change how contours are grouped together and sorted. Target grouping can pair adjacent contours, such as the targets found in 2019. Target intersection defines where the targets would intersect if you extended them infinitely, for example, to only group targets tipped "towards" each other in 2019. @@ -71,9 +64,9 @@ Finally, target sort defines how targets are ranked, from "best" to "worst." The - Leftmost - Centermost -.. raw:: html - - +```{raw} html + +``` diff --git a/docs/source/docs/reflectiveAndShape/index.md b/docs/source/docs/reflectiveAndShape/index.md new file mode 100644 index 0000000000..a9e293ffcc --- /dev/null +++ b/docs/source/docs/reflectiveAndShape/index.md @@ -0,0 +1,10 @@ +# Colored Shape & Reflective + +```{toctree} +:maxdepth: 0 +:titlesonly: true + +thresholding +contour-filtering +3D +``` diff --git a/docs/source/docs/reflectiveAndShape/index.rst b/docs/source/docs/reflectiveAndShape/index.rst deleted file mode 100644 index fab306351b..0000000000 --- a/docs/source/docs/reflectiveAndShape/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Colored Shape & Reflective -========================== - -.. toctree:: - :maxdepth: 0 - :titlesonly: - - thresholding - contour-filtering - 3D diff --git a/docs/source/docs/reflectiveAndShape/thresholding.rst b/docs/source/docs/reflectiveAndShape/thresholding.md similarity index 65% rename from docs/source/docs/reflectiveAndShape/thresholding.rst rename to docs/source/docs/reflectiveAndShape/thresholding.md index df6e886090..93debb8b68 100644 --- a/docs/source/docs/reflectiveAndShape/thresholding.rst +++ b/docs/source/docs/reflectiveAndShape/thresholding.md @@ -1,37 +1,36 @@ -Thresholding -============ +# Thresholding -For colored shape detection, we want to tune our HSV thresholds such that only the goal color remains after the thresholding. The `HSV color representation `__ is similar to RGB in that it represents colors. However, HSV represents colors with hue, saturation and value components. Hue refers to the color, while saturation and value describe its richness and brightness. +For colored shape detection, we want to tune our HSV thresholds such that only the goal color remains after the thresholding. The [HSV color representation](https://en.wikipedia.org/wiki/HSL_and_HSV) is similar to RGB in that it represents colors. However, HSV represents colors with hue, saturation and value components. Hue refers to the color, while saturation and value describe its richness and brightness. In PhotonVision, HSV thresholds is available in the "Threshold" tab. -.. raw:: html +```{raw} html + +``` - - -Color Picker ------------- +## Color Picker The color picker can be used to quickly adjust HSV values. "Set to average" will set the HSV range to the color of the pixel selected, while "shrink range" and "expand range" will change the HSV threshold to include or exclude the selected pixel, respectively. -.. raw:: html +```{raw} html + +``` - +## Tuning Steps -Tuning Steps ------------- The following steps were derived from FRC 254's 2016 Championship presentation on computer vision and allows you to accurately tune PhotonVision to track your target. In order to properly capture the colors that you want, first turn your exposure low until you have a mostly dark image with the target still showing. A darker image ensures that you don't see things that aren't your target (ex. overhead lights). Be careful not to overexpose your image (you will be able to tell this if a target looks more cyan/white or equivalent instead of green when looking at it through the video feed) since that can give you poor results. For HSV tuning, start with Hue, as it is the most important/differentiating factor when it comes to detecting color. You want to make the range for Hue as small as possible in order to get accurate tracking. Feel free to reference the chart below to help. After you have properly tuned Hue, tune for high saturation/color intensity (S), and then brightness (V). Using this method will decrease the likelihood that you need to calibrate on the field. Saturation and Value's upper bounds will often end up needing to be the maximum (255). -.. image:: images/hsl_top.png - :width: 600 - :alt: HSV chart +```{image} images/hsl_top.png +:alt: HSV chart +:width: 600 +``` diff --git a/docs/source/docs/settings.rst b/docs/source/docs/settings.md similarity index 71% rename from docs/source/docs/settings.rst rename to docs/source/docs/settings.md index 6bbccb6ce9..d35657c326 100644 --- a/docs/source/docs/settings.rst +++ b/docs/source/docs/settings.md @@ -1,21 +1,23 @@ -Settings -======== +# Settings -.. image:: assets/settings.png +```{image} assets/settings.png +``` + +## General -General -^^^^^^^ Here, you can view general data on your system, including version, hardware, your platform, and performance statistics. You can also export/import the settings in a .zip file or restart PhotonVision/your coprocessor. -Networking -^^^^^^^^^^ -Here, you can set your team number, switch your IP between DHCP and static, and specify your host name. For more information about on-robot networking, click `here. `_ +## Networking + +Here, you can set your team number, switch your IP between DHCP and static, and specify your host name. For more information about on-robot networking, click [here.](https://docs.wpilib.org/en/latest/docs/networking/networking-introduction/networking-basics.html) The "team number" field will accept (in addition to a team number) an IP address or hostname. This is useful for testing PhotonVision on the same computer as a simulated robot program; you can set the team number to "localhost", and PhotonVision will send data to the network tables in the simulated robot. -.. note:: Something must be entered into the team number field if using PhotonVision on a robot. Using a team number is recommended (as opposed to an IP address or hostname). +:::{note} +Something must be entered into the team number field if using PhotonVision on a robot. Using a team number is recommended (as opposed to an IP address or hostname). +::: + +## LEDs -LEDs -^^^^ If your coprocessor electronics support hardware-controlled LED's and has the proper hardware configuration set up, here you can adjust the brightness of your LEDs. diff --git a/docs/source/docs/simulation/hardware-in-the-loop-sim.rst b/docs/source/docs/simulation/hardware-in-the-loop-sim.md similarity index 57% rename from docs/source/docs/simulation/hardware-in-the-loop-sim.rst rename to docs/source/docs/simulation/hardware-in-the-loop-sim.md index 1594060c36..5897d0b910 100644 --- a/docs/source/docs/simulation/hardware-in-the-loop-sim.rst +++ b/docs/source/docs/simulation/hardware-in-the-loop-sim.md @@ -1,9 +1,8 @@ -Hardware In The Loop Simulation -=============================== +# Hardware In The Loop Simulation Hardware in the loop simulation is using a physical device, such as a supported co-processor running PhotonVision, to enhance simulation capabilities. This is useful for developing and validating code before the camera is attached to a robot, as well as reducing the work required to use WPILib simulation with PhotonVision. -Before continuing, ensure PhotonVision is installed on your target device. Instructions can be found :ref:`here ` for all devices. +Before continuing, ensure PhotonVision is installed on your device. Instructions can be found {ref}`here ` for all devices. Your coprocessor and computer running simulation will have to be connected to the same network, like a home router. Connecting the coprocessor directly to the computer will not work. @@ -11,28 +10,30 @@ To simulate with hardware in the loop, a one-line change is required. From the P During normal robot operation, a team's number would be entered into this field so that the PhotonVision coprocessor connects to the roboRIO as a NT client. Instead, enter the IP address of your computer running the simulation here. -.. note:: +:::{note} +To find the IP address of your Windows computer, open command prompt and run `ipconfig`. - To find the IP address of your Windows computer, open command prompt and run ``ipconfig``. +```console +C:/Users/you>ipconfig - .. code-block:: console +Windows IP Configuration - C:/Users/you>ipconfig +Ethernet adapter Ethernet: - Windows IP Configuration + Connection-specific DNS Suffix . : home + Link-local IPv6 Address . . . . . : fe80::b41d:e861:ef01:9dba%10 + IPv4 Address. . . . . . . . . . . : 192.168.254.13 + Subnet Mask . . . . . . . . . . . : 255.255.255.0 + Default Gateway . . . . . . . . . : 192.168.254.254 +``` +::: - Ethernet adapter Ethernet: - - Connection-specific DNS Suffix . : home - Link-local IPv6 Address . . . . . : fe80::b41d:e861:ef01:9dba%10 - IPv4 Address. . . . . . . . . . . : 192.168.254.13 - Subnet Mask . . . . . . . . . . . : 255.255.255.0 - Default Gateway . . . . . . . . . : 192.168.254.254 - -.. image:: images/coproc-client-to-desktop-sim.png +```{image} images/coproc-client-to-desktop-sim.png +``` No code changes are required, PhotonLib should function similarly to normal operation. Now launch simulation, and you should be able to see the PhotonVision table on your simulation's NetworkTables dashboard. -.. image:: images/hardware-in-the-loop-sim.png +```{image} images/hardware-in-the-loop-sim.png +``` diff --git a/docs/source/docs/simulation/index.md b/docs/source/docs/simulation/index.md new file mode 100644 index 0000000000..a4f007cfee --- /dev/null +++ b/docs/source/docs/simulation/index.md @@ -0,0 +1,11 @@ +# Simulation + +```{toctree} +:maxdepth: 0 +:titlesonly: true + +simulation-java +simulation-cpp +simulation-python +hardware-in-the-loop-sim +``` diff --git a/docs/source/docs/simulation/index.rst b/docs/source/docs/simulation/index.rst deleted file mode 100644 index 1ec3a38d0c..0000000000 --- a/docs/source/docs/simulation/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Simulation -========== - -.. toctree:: - :maxdepth: 0 - :titlesonly: - - simulation - simulation-deprecated - hardware-in-the-loop-sim diff --git a/docs/source/docs/simulation/simulation-cpp.md b/docs/source/docs/simulation/simulation-cpp.md new file mode 100644 index 0000000000..ccb28de0be --- /dev/null +++ b/docs/source/docs/simulation/simulation-cpp.md @@ -0,0 +1,5 @@ +# Simulation Support in PhotonLib in C++ + +## What Is Supported? + +Nothing yet. diff --git a/docs/source/docs/simulation/simulation-deprecated.rst b/docs/source/docs/simulation/simulation-deprecated.rst deleted file mode 100644 index d47d625d78..0000000000 --- a/docs/source/docs/simulation/simulation-deprecated.rst +++ /dev/null @@ -1,94 +0,0 @@ -Simulation Support in PhotonLib (Deprecated) -============================================ - -.. attention:: This page details the pre-2024 simulation support. For current Java simulation support, see :doc:`/docs/simulation/simulation`. - -What Is Supported? ------------------- - -PhotonLib supports simulation of a camera and coprocessor running PhotonVision moving about a field on a robot. - -You can use this to help validate your robot code's behavior in simulation without needing a physical robot. - -Simulation Vision World Model ------------------------------ - -Sim-specific classes are provided to model sending one frame of a camera image through PhotonVision. Based on what targets are visible, results are published to NetworkTables. - -While processing, the given robot ``Pose2d`` is used to analyze which targets should be in view, and determine where they would have shown up in the camera image. - -Targets are considered in view if: - -1) Their centroid is within the field of view of the camera. -2) The camera is not in driver mode. -3) The target's in-image pixel size is greater than ``minTargetArea`` -4) The distance from the camera to the target is less than ``maxLEDRange`` - -.. warning:: Not all network tables objects are updated in simulation. The interaction through PhotonLib remains the same. Actual camera images are also not simulated. - -Latency of processing is not yet modeled. - -.. image:: diagrams/SimArchitecture-deprecated.drawio.svg - :alt: A diagram comparing the architecture of a real PhotonVision process to a simulated one. - -Simulated Vision System ------------------------ - -A ``SimVisionSystem`` represents the camera and coprocessor running PhotonVision moving around on the field. - -It requires a number of pieces of configuration to accurately simulate your physical setup. Match them to your configuration in PhotonVision, and to your robot's physical dimensions. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 73-93 - -After declaring the system, you should create and add one ``SimVisionTarget`` per target you are attempting to detect. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 95-111 - -Finally, while running the simulation, process simulated camera frames by providing the robot's pose to the system. - -.. tab-set-code:: - - .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java - :language: java - :lines: 138-139 - -This will cause most NetworkTables fields to update properly, representing any targets that are in view of the robot. - -Robot software which uses PhotonLib to interact with a camera running PhotonVision should work the same as though a real camera was hooked up and active. - -Raw-Data Approach ------------------ - -Users may wish to directly provide target information based on an existing detailed simulation. - -A ``SimPhotonCamera`` can be created for this purpose. It provides an interface where the user can supply target data via a list of ``PhotonTrackedTarget`` objects. - -.. tab-set-code:: - - .. code-block:: java - - @Override - public void simulationInit() { - // ... - cam = new SimPhotonCamera("MyCamera"); - // ... - } - - @Override - public void simulationPeriodic() { - // ... - ArrayList visibleTgtList = new ArrayList(); - visibleTgtList.add(new PhotonTrackedTarget(yawDegrees, pitchDegrees, area, skew, camToTargetTrans)); // Repeat for each target that you see - cam.submitProcessedFrame(0.0, visibleTgtList); - // ... - } - -Note that while there is less code and configuration required to get basic data into the simulation, this approach will cause the user to need to implement much more code on their end to calculate the relative positions of the robot and target. If you already have this, the raw interface may be helpful. However, if you don't, you'll likely want to be looking at the Simulated Vision System first. diff --git a/docs/source/docs/simulation/simulation-java.md b/docs/source/docs/simulation/simulation-java.md new file mode 100644 index 0000000000..7b4f99da97 --- /dev/null +++ b/docs/source/docs/simulation/simulation-java.md @@ -0,0 +1,252 @@ +# Simulation Support in PhotonLib in Java + + +## What Is Simulated? + +Simulation is a powerful tool for validating robot code without access to a physical robot. Read more about [simulation in WPILib](https://docs.wpilib.org/en/stable/docs/software/wpilib-tools/robot-simulation/introduction.html). + +In Java, PhotonLib can simulate cameras on the field and generate target data approximating what would be seen in reality. This simulation attempts to include the following: + +- Camera Properties + - Field of Vision + - Lens distortion + - Image noise + - Framerate + - Latency +- Target Data + - Detected / minimum-area-rectangle corners + - Center yaw/pitch + - Contour image area percentage + - Fiducial ID + - Fiducial ambiguity + - Fiducial solvePNP transform estimation +- Camera Raw/Processed Streams (grayscale) + +:::{note} +Simulation does NOT include the following: + +- Full physical camera/world simulation (targets are automatically thresholded) +- Image Thresholding Process (camera gain, brightness, etc) +- Pipeline switching +- Snapshots +::: + +This scope was chosen to balance fidelity of the simulation with the ease of setup, in a way that would best benefit most teams. + +```{image} diagrams/SimArchitecture.drawio.svg +:alt: A diagram comparing the architecture of a real PhotonVision process to a simulated +: one. +``` + +## Drivetrain Simulation Prerequisite + +A prerequisite for simulating vision frames is knowing where the camera is on the field-- to utilize PhotonVision simulation, you'll need to supply the simulated robot pose periodically. This requires drivetrain simulation for your robot project if you want to generate camera frames as your robot moves around the field. + +References for using PhotonVision simulation with drivetrain simulation can be found in the [PhotonLib Java Examples](https://github.com/PhotonVision/photonvision/blob/2a6fa1b6ac81f239c59d724da5339f608897c510/photonlib-java-examples/README.md) for both a differential drivetrain and a swerve drive. + +:::{important} +The simulated drivetrain pose must be separate from the drivetrain estimated pose if a pose estimator is utilized. +::: + +## Vision System Simulation + +A `VisionSystemSim` represents the simulated world for one or more cameras, and contains the vision targets they can see. It is constructed with a unique label: + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // A vision system sim labelled as "main" in NetworkTables + VisionSystemSim visionSim = new VisionSystemSim("main"); +``` + +PhotonLib will use this label to put a `Field2d` widget on NetworkTables at `/VisionSystemSim-[label]/Sim Field`. This label does not need to match any camera name or pipeline name in PhotonVision. + +Vision targets require a `TargetModel`, which describes the shape of the target. For AprilTags, PhotonLib provides `TargetModel.kAprilTag16h5` for the tags used in 2023, and `TargetModel.kAprilTag36h11` for the tags used starting in 2024. For other target shapes, convenience constructors exist for spheres, cuboids, and planar rectangles. For example, a planar rectangle can be created with: + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // A 0.5 x 0.25 meter rectangular target + TargetModel targetModel = new TargetModel(0.5, 0.25); +``` + +These `TargetModel` are paired with a target pose to create a `VisionTargetSim`. A `VisionTargetSim` is added to the `VisionSystemSim` to become visible to all of its cameras. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // The pose of where the target is on the field. + // Its rotation determines where "forward" or the target x-axis points. + // Let's say this target is flat against the far wall center, facing the blue driver stations. + Pose3d targetPose = new Pose3d(16, 4, 2, new Rotation3d(0, 0, Math.PI)); + // The given target model at the given pose + VisionTargetSim visionTarget = new VisionTargetSim(targetPose, targetModel); + + // Add this vision target to the vision system simulation to make it visible + visionSim.addVisionTargets(visionTarget); +``` + +:::{note} +The pose of a `VisionTargetSim` object can be updated to simulate moving targets. Note, however, that this will break latency simulation for that target. +::: + +For convenience, an `AprilTagFieldLayout` can also be added to automatically create a target for each of its AprilTags. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // The layout of AprilTags which we want to add to the vision system + AprilTagFieldLayout tagLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.k2024Crescendo.m_resourceFile); + + visionSim.addAprilTags(tagLayout); +``` + +:::{note} +The poses of the AprilTags from this layout depend on its current alliance origin (e.g. blue or red). If this origin is changed later, the targets will have to be cleared from the `VisionSystemSim` and re-added. +::: + +## Camera Simulation + +Now that we have a simulation world with vision targets, we can add simulated cameras to view it. + +Before adding a simulated camera, we need to define its properties. This is done with the `SimCameraProperties` class: + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // The simulated camera properties + SimCameraProperties cameraProp = new SimCameraProperties(); +``` + +By default, this will create a 960 x 720 resolution camera with a 90 degree diagonal FOV(field-of-view) and no noise, distortion, or latency. If we want to change these properties, we can do so: + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // A 640 x 480 camera with a 100 degree diagonal FOV. + cameraProp.setCalibration(640, 480, Rotation2d.fromDegrees(100)); + // Approximate detection noise with average and standard deviation error in pixels. + cameraProp.setCalibError(0.25, 0.08); + // Set the camera image capture framerate (Note: this is limited by robot loop rate). + cameraProp.setFPS(20); + // The average and standard deviation in milliseconds of image data latency. + cameraProp.setAvgLatencyMs(35); + cameraProp.setLatencyStdDevMs(5); +``` + +These properties are used in a `PhotonCameraSim`, which handles generating captured frames of the field from the simulated camera's perspective, and calculating the target data which is sent to the `PhotonCamera` being simulated. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // The PhotonCamera used in the real robot code. + PhotonCamera camera = new PhotonCamera("cameraName"); + + // The simulation of this camera. Its values used in real robot code will be updated. + PhotonCameraSim cameraSim = new PhotonCameraSim(camera, cameraProp); +``` + +The `PhotonCameraSim` can now be added to the `VisionSystemSim`. We have to define a robot-to-camera transform, which describes where the camera is relative to the robot pose (this can be measured in CAD or by hand). + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Our camera is mounted 0.1 meters forward and 0.5 meters up from the robot pose, + // (Robot pose is considered the center of rotation at the floor level, or Z = 0) + Translation3d robotToCameraTrl = new Translation3d(0.1, 0, 0.5); + // and pitched 15 degrees up. + Rotation3d robotToCameraRot = new Rotation3d(0, Math.toRadians(-15), 0); + Transform3d robotToCamera = new Transform3d(robotToCameraTrl, robotToCameraRot); + + // Add this camera to the vision system simulation with the given robot-to-camera transform. + visionSim.addCamera(cameraSim, robotToCamera); +``` + +:::{important} +You may add multiple cameras to one `VisionSystemSim`, but not one camera to multiple `VisionSystemSim`. All targets in the `VisionSystemSim` will be visible to all its cameras. +::: + +If the camera is mounted on a mobile mechanism (like a turret) this transform can be updated in a periodic loop. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // The turret the camera is mounted on is rotated 5 degrees + Rotation3d turretRotation = new Rotation3d(0, 0, Math.toRadians(5)); + robotToCamera = new Transform3d( + robotToCameraTrl.rotateBy(turretRotation), + robotToCameraRot.rotateBy(turretRotation)); + visionSim.adjustCamera(cameraSim, robotToCamera); +``` + +## Updating The Simulation World + +To update the `VisionSystemSim`, we simply have to pass in the simulated robot pose periodically (in `simulationPeriodic()`). + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Update with the simulated drivetrain pose. This should be called every loop in simulation. + visionSim.update(robotPoseMeters); +``` + +Targets and cameras can be added and removed, and camera properties can be changed at any time. + +## Visualizing Results + +Each `VisionSystemSim` has its own built-in `Field2d` for displaying object poses in the simulation world such as the robot, simulated cameras, and actual/measured target poses. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Get the built-in Field2d used by this VisionSystemSim + visionSim.getDebugField(); +``` + +:::{figure} images/SimExampleField.png +*A* `VisionSystemSim`*'s internal* `Field2d` *customized with target images and colors* +::: + +A `PhotonCameraSim` can also draw and publish generated camera frames to a MJPEG stream similar to an actual PhotonVision process. + +```{eval-rst} +.. tab-set-code:: + + .. code-block:: Java + + // Enable the raw and processed streams. These are enabled by default. + cameraSim.enableRawStream(true); + cameraSim.enableProcessedStream(true); + + // Enable drawing a wireframe visualization of the field to the camera streams. + // This is extremely resource-intensive and is disabled by default. + cameraSim.enableDrawWireframe(true); +``` + +These streams follow the port order mentioned in {ref}`docs/installation/networking:Camera Stream Ports`. For example, a single simulated camera will have its raw stream at `localhost:1181` and processed stream at `localhost:1182`, which can also be found in the CameraServer tab of Shuffleboard like a normal camera stream. + +:::{figure} images/SimExampleFrame.png +*A frame from the processed stream of a simulated camera viewing some 2023 AprilTags with the field wireframe enabled* +::: diff --git a/docs/source/docs/simulation/simulation-python.md b/docs/source/docs/simulation/simulation-python.md new file mode 100644 index 0000000000..eb871c6bbe --- /dev/null +++ b/docs/source/docs/simulation/simulation-python.md @@ -0,0 +1,5 @@ +# Simulation Support in PhotonLib in Python + +## What Is Supported? + +Nothing Yet diff --git a/docs/source/docs/simulation/simulation.rst b/docs/source/docs/simulation/simulation.rst deleted file mode 100644 index 8b5780850a..0000000000 --- a/docs/source/docs/simulation/simulation.rst +++ /dev/null @@ -1,226 +0,0 @@ -Simulation Support in PhotonLib -=============================== - -.. attention:: This page details the current simulation support for Java. For other languages, see :doc:`/docs/simulation/simulation-deprecated`. - -What Is Simulated? ------------------- - -Simulation is a powerful tool for validating robot code without access to a physical robot. Read more about `simulation in WPILib `_. - -PhotonLib can simulate cameras on the field and generate target data approximating what would be seen in reality. This simulation attempts to include the following: - -- Camera Properties - - Field of Vision - - Lens distortion - - Image noise - - Framerate - - Latency -- Target Data - - Detected / minimum-area-rectangle corners - - Center yaw/pitch - - Contour image area percentage - - Fiducial ID - - Fiducial ambiguity - - Fiducial solvePNP transform estimation -- Camera Raw/Processed Streams (grayscale) - -.. note:: - - Simulation does NOT include the following: - - - Full physical camera/world simulation (targets are automatically thresholded) - - Image Thresholding Process (camera gain, brightness, etc) - - Pipeline switching - - Snapshots - -This scope was chosen to balance fidelity of the simulation with the ease of setup, in a way that would best benefit most teams. - -.. image:: diagrams/SimArchitecture.drawio.svg - :alt: A diagram comparing the architecture of a real PhotonVision process to a simulated one. - -Drivetrain Simulation Prerequisite ----------------------------------- - -A prerequisite for simulating vision frames is knowing where the camera is on the field-- to utilize PhotonVision simulation, you'll need to supply the simulated robot pose periodically. This requires drivetrain simulation for your robot project if you want to generate camera frames as your robot moves around the field. - -References for using PhotonVision simulation with drivetrain simulation can be found in the `PhotonLib Java Examples `_ for both a differential drivetrain and a swerve drive. - -.. important:: The simulated drivetrain pose must be separate from the drivetrain estimated pose if a pose estimator is utilized. - -Vision System Simulation ------------------------- - -A ``VisionSystemSim`` represents the simulated world for one or more cameras, and contains the vision targets they can see. It is constructed with a unique label: - -.. tab-set-code:: - - .. code-block:: java - - // A vision system sim labelled as "main" in NetworkTables - VisionSystemSim visionSim = new VisionSystemSim("main"); - -PhotonLib will use this label to put a ``Field2d`` widget on NetworkTables at `/VisionSystemSim-[label]/Sim Field`. This label does not need to match any camera name or pipeline name in PhotonVision. - -Vision targets require a ``TargetModel``, which describes the shape of the target. For AprilTags, PhotonLib provides ``TargetModel.kAprilTag16h5`` for the tags used in 2023, and ``TargetModel.kAprilTag36h11`` for the tags used starting in 2024. For other target shapes, convenience constructors exist for spheres, cuboids, and planar rectangles. For example, a planar rectangle can be created with: - -.. tab-set-code:: - - .. code-block:: java - - // A 0.5 x 0.25 meter rectangular target - TargetModel targetModel = new TargetModel(0.5, 0.25); - -These ``TargetModel`` are paired with a target pose to create a ``VisionTargetSim``. A ``VisionTargetSim`` is added to the ``VisionSystemSim`` to become visible to all of its cameras. - -.. tab-set-code:: - - .. code-block:: java - - // The pose of where the target is on the field. - // Its rotation determines where "forward" or the target x-axis points. - // Let's say this target is flat against the far wall center, facing the blue driver stations. - Pose3d targetPose = new Pose3d(16, 4, 2, new Rotation3d(0, 0, Math.PI)); - // The given target model at the given pose - VisionTargetSim visionTarget = new VisionTargetSim(targetPose, targetModel); - - // Add this vision target to the vision system simulation to make it visible - visionSim.addVisionTargets(visionTarget); - -.. note:: The pose of a ``VisionTargetSim`` object can be updated to simulate moving targets. Note, however, that this will break latency simulation for that target. - -For convenience, an ``AprilTagFieldLayout`` can also be added to automatically create a target for each of its AprilTags. - -.. tab-set-code:: - - .. code-block:: java - - // The layout of AprilTags which we want to add to the vision system - AprilTagFieldLayout tagLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.k2024Crescendo.m_resourceFile); - - visionSim.addAprilTags(tagLayout); - -.. note:: The poses of the AprilTags from this layout depend on its current alliance origin (e.g. blue or red). If this origin is changed later, the targets will have to be cleared from the ``VisionSystemSim`` and re-added. - -Camera Simulation ------------------ - -Now that we have a simulation world with vision targets, we can add simulated cameras to view it. - -Before adding a simulated camera, we need to define its properties. This is done with the ``SimCameraProperties`` class: - -.. tab-set-code:: - - .. code-block:: java - - // The simulated camera properties - SimCameraProperties cameraProp = new SimCameraProperties(); - -By default, this will create a 960 x 720 resolution camera with a 90 degree diagonal FOV(field-of-view) and no noise, distortion, or latency. If we want to change these properties, we can do so: - -.. tab-set-code:: - - .. code-block:: java - - // A 640 x 480 camera with a 100 degree diagonal FOV. - cameraProp.setCalibration(640, 480, Rotation2d.fromDegrees(100)); - // Approximate detection noise with average and standard deviation error in pixels. - cameraProp.setCalibError(0.25, 0.08); - // Set the camera image capture framerate (Note: this is limited by robot loop rate). - cameraProp.setFPS(20); - // The average and standard deviation in milliseconds of image data latency. - cameraProp.setAvgLatencyMs(35); - cameraProp.setLatencyStdDevMs(5); - -These properties are used in a ``PhotonCameraSim``, which handles generating captured frames of the field from the simulated camera's perspective, and calculating the target data which is sent to the ``PhotonCamera`` being simulated. - -.. tab-set-code:: - - .. code-block:: java - - // The PhotonCamera used in the real robot code. - PhotonCamera camera = new PhotonCamera("cameraName"); - - // The simulation of this camera. Its values used in real robot code will be updated. - PhotonCameraSim cameraSim = new PhotonCameraSim(camera, cameraProp); - -The ``PhotonCameraSim`` can now be added to the ``VisionSystemSim``. We have to define a robot-to-camera transform, which describes where the camera is relative to the robot pose (this can be measured in CAD or by hand). - -.. tab-set-code:: - - .. code-block:: java - - // Our camera is mounted 0.1 meters forward and 0.5 meters up from the robot pose, - // (Robot pose is considered the center of rotation at the floor level, or Z = 0) - Translation3d robotToCameraTrl = new Translation3d(0.1, 0, 0.5); - // and pitched 15 degrees up. - Rotation3d robotToCameraRot = new Rotation3d(0, Math.toRadians(-15), 0); - Transform3d robotToCamera = new Transform3d(robotToCameraTrl, robotToCameraRot); - - // Add this camera to the vision system simulation with the given robot-to-camera transform. - visionSim.addCamera(cameraSim, robotToCamera); - -.. important:: You may add multiple cameras to one ``VisionSystemSim``, but not one camera to multiple ``VisionSystemSim``. All targets in the ``VisionSystemSim`` will be visible to all its cameras. - -If the camera is mounted on a mobile mechanism (like a turret) this transform can be updated in a periodic loop. - -.. tab-set-code:: - - .. code-block:: java - - // The turret the camera is mounted on is rotated 5 degrees - Rotation3d turretRotation = new Rotation3d(0, 0, Math.toRadians(5)); - robotToCamera = new Transform3d( - robotToCameraTrl.rotateBy(turretRotation), - robotToCameraRot.rotateBy(turretRotation)); - visionSim.adjustCamera(cameraSim, robotToCamera); - -Updating The Simulation World ------------------------------ - -To update the ``VisionSystemSim``, we simply have to pass in the simulated robot pose periodically (in ``simulationPeriodic()``). - -.. tab-set-code:: - - .. code-block:: java - - // Update with the simulated drivetrain pose. This should be called every loop in simulation. - visionSim.update(robotPoseMeters); - -Targets and cameras can be added and removed, and camera properties can be changed at any time. - -Visualizing Results -------------------- - -Each ``VisionSystemSim`` has its own built-in ``Field2d`` for displaying object poses in the simulation world such as the robot, simulated cameras, and actual/measured target poses. - -.. tab-set-code:: - - .. code-block:: java - - // Get the built-in Field2d used by this VisionSystemSim - visionSim.getDebugField(); - -.. figure:: images/SimExampleField.png - - *A* ``VisionSystemSim``\ *'s internal* ``Field2d`` *customized with target images and colors, as seen in the* `swervedriveposeestsim `_ *example.* - -A ``PhotonCameraSim`` can also draw and publish generated camera frames to a MJPEG stream similar to an actual PhotonVision process. - -.. tab-set-code:: - - .. code-block:: java - - // Enable the raw and processed streams. These are enabled by default. - cameraSim.enableRawStream(true); - cameraSim.enableProcessedStream(true); - - // Enable drawing a wireframe visualization of the field to the camera streams. - // This is extremely resource-intensive and is disabled by default. - cameraSim.enableDrawWireframe(true); - -These streams follow the port order mentioned in :ref:`docs/installation/networking:Camera Stream Ports`. For example, a single simulated camera will have its raw stream at ``localhost:1181`` and processed stream at ``localhost:1182``, which can also be found in the CameraServer tab of Shuffleboard like a normal camera stream. - -.. figure:: images/SimExampleFrame.png - - *A frame from the processed stream of a simulated camera viewing some 2023 AprilTags with the field wireframe enabled, as seen in the* `swervedriveposeestsim example `_. diff --git a/docs/source/docs/troubleshooting/camera-troubleshooting.rst b/docs/source/docs/troubleshooting/camera-troubleshooting.md similarity index 57% rename from docs/source/docs/troubleshooting/camera-troubleshooting.rst rename to docs/source/docs/troubleshooting/camera-troubleshooting.md index 82c9249031..a55ff29716 100644 --- a/docs/source/docs/troubleshooting/camera-troubleshooting.rst +++ b/docs/source/docs/troubleshooting/camera-troubleshooting.md @@ -1,49 +1,39 @@ -Camera Troubleshooting -====================== +# Camera Troubleshooting -Pi Cameras ----------- +## Pi Cameras -If you haven't yet, please refer to :ref:`the Pi CSI Camera Configuration page ` for information on updating :code:`config.txt` for your use case. If you've tried that, and things still aren't working, restart PhotonVision using the restart button in the settings tab, and press tilde (\`) in the web UI once connection is restored. This should show the most recent boot log. +If you haven't yet, please refer to {ref}`the Pi CSI Camera Configuration page ` for information on updating {code}`config.txt` for your use case. If you've tried that, and things still aren't working, restart PhotonVision using the restart button in the settings tab, and press tilde (\`) in the web UI once connection is restored. This should show the most recent boot log. -+----------------------------------+--------------------------------------------------------+------------------------------------+ -| | Expected output | Bad | -+==================================+========================================================+====================================+ -| LibCamera driver initialization | Successfully loaded libpicam shared object | Failed to load native libraries! | -+----------------------------------+--------------------------------------------------------+------------------------------------+ -| Camera detected | Adding local video device - "unicam" at "/dev/video0" | No output from VisionSourceManager | -+----------------------------------+--------------------------------------------------------+------------------------------------+ -| VisionSource created | Adding 1 configs to VMM. | No output from VisionSourceManager | -+----------------------------------+--------------------------------------------------------+------------------------------------+ +| | Expected output | Bad | +| ------------------------------- | ----------------------------------------------------- | ---------------------------------- | +| LibCamera driver initialization | Successfully loaded libpicam shared object | Failed to load native libraries! | +| Camera detected | Adding local video device - "unicam" at "/dev/video0" | No output from VisionSourceManager | +| VisionSource created | Adding 1 configs to VMM. | No output from VisionSourceManager | -If the driver isn't loaded, you may be using a non-official Pi image, or an image not new enough. Try updating to the most recent image available (one released for 2023) -- if that doesn't resolve the problem, :ref:`contact us` with your settings ZIP file and Pi version/camera version/config.txt file used. +If the driver isn't loaded, you may be using a non-official Pi image, or an image not new enough. Try updating to the most recent image available (one released for 2023) -- if that doesn't resolve the problem, {ref}`contact us` with your settings ZIP file and Pi version/camera version/config.txt file used. -If the camera is not detected, the most likely cause is either a config.txt file incorrectly set-up, or a ribbon cable attached backwards. Review the :ref:`picam configuration page `, and verify the ribbon cable is properly oriented at both ends, and that it is _fully_ inserted into the FFC connector. Then, :ref:`contact us` with your settings ZIP file and Pi version/camera version/config.txt file used. +If the camera is not detected, the most likely cause is either a config.txt file incorrectly set-up, or a ribbon cable attached backwards. Review the {ref}`picam configuration page `, and verify the ribbon cable is properly oriented at both ends, and that it is \_fully\_ inserted into the FFC connector. Then, {ref}`contact us` with your settings ZIP file and Pi version/camera version/config.txt file used. -USB cameras ------------ +## USB cameras USB cameras supported by CSCore require no libcamera driver initialization to work -- however, similar troubleshooting steps apply. Restart PhotonVision using the restart button in the settings tab, and press tilde on your keyboard (\`) when you're in the web UI once connection is restored. We expect to see the following output: -+----------------------------------+--------------------------------------------------------+------------------------------------+ -| | Expected output | Bad | -+==================================+========================================================+====================================+ -| Camera detected | Adding local video device - "foobar" at "/dev/foobar" | No output from VisionSourceManager | -+----------------------------------+--------------------------------------------------------+------------------------------------+ -| VisionSource created | Adding 1 configs to VMM. | No output from VisionSourceManager | -+----------------------------------+--------------------------------------------------------+------------------------------------+ +| | Expected output | Bad | +| -------------------- | ----------------------------------------------------- | ---------------------------------- | +| Camera detected | Adding local video device - "foobar" at "/dev/foobar" | No output from VisionSourceManager | +| VisionSource created | Adding 1 configs to VMM. | No output from VisionSourceManager | -Determining detected cameras in Video4Linux (v4l2) --------------------------------------------------- +## Determining detected cameras in Video4Linux (v4l2) On Linux devices (including Raspberry Pi), PhotonVision uses WPILib's CSCore to interact with video devices, which internally uses Video4Linux (v4l2). CSCore, and therefore Photon, requires that cameras attached have good v4l drivers for proper functionality. These should be built into the Linux kernel, and do not need to be installed manually. Valid picamera setup (from /boot/config.txt) can also be determined using these steps. The list-devices command will show all valid video devices detected, and list-formats the list of "video modes" each camera can be in. -- For picams: edit the config.txt file as described in the :ref:`picam configuration page ` -- SSH into your Pi: :code:`ssh pi@photonvision.local` and enter the username "pi" & password "raspberry" -- run :code:`v4l2-ctl --list-devices` and :code:`v4l2-ctl --list-formats` +- For picams: edit the config.txt file as described in the {ref}`picam configuration page ` +- SSH into your Pi: {code}`ssh pi@photonvision.local` and enter the username "pi" & password "raspberry" +- run {code}`v4l2-ctl --list-devices` and {code}`v4l2-ctl --list-formats` -We expect an output similar to the following. For picameras, note the "unicam" entry with path :code:`platform:3f801000.csi` (if we don't see this, that's bad), and a huge list of valid video formats. USB cameras should show up similarly in the output of these commands. +We expect an output similar to the following. For picameras, note the "unicam" entry with path {code}`platform:3f801000.csi` (if we don't see this, that's bad), and a huge list of valid video formats. USB cameras should show up similarly in the output of these commands. +```{eval-rst} .. tab-set:: .. tab-item:: Working @@ -115,3 +105,4 @@ We expect an output similar to the following. For picameras, note the "unicam" e /dev/video19 /dev/media2 Cannot open device /dev/video0, exiting. +``` diff --git a/docs/source/docs/troubleshooting/common-errors.rst b/docs/source/docs/troubleshooting/common-errors.md similarity index 57% rename from docs/source/docs/troubleshooting/common-errors.rst rename to docs/source/docs/troubleshooting/common-errors.md index 7f391c86b4..5addeafbc2 100644 --- a/docs/source/docs/troubleshooting/common-errors.rst +++ b/docs/source/docs/troubleshooting/common-errors.md @@ -1,69 +1,63 @@ -Common Issues / Questions -========================= +# Common Issues / Questions -This page will grow as needed in order to cover commonly seen issues by teams. If this page doesn't help you and you need further assistance, feel free to :ref:`Contact Us`. +This page will grow as needed in order to cover commonly seen issues by teams. If this page doesn't help you and you need further assistance, feel free to {ref}`Contact Us`. -Known Issues ------------- -All known issues can be found on our `GitHub page `_. +## Known Issues -PS3Eye -^^^^^^ -Due to an issue with Linux kernels, the drivers for the PS3Eye are no longer supported. If you would still like to use the PS3Eye, you can downgrade your kernel with the following command: ``sudo CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt rpi-update 866751bfd023e72bd96a8225cf567e03c334ecc4``. Note: You must be connected to the internet to run the command. +All known issues can be found on our [GitHub page](https://github.com/PhotonVision/photonvision/issues). -LED Control -^^^^^^^^^^^ +### PS3Eye + +Due to an issue with Linux kernels, the drivers for the PS3Eye are no longer supported. If you would still like to use the PS3Eye, you can downgrade your kernel with the following command: `sudo CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt rpi-update 866751bfd023e72bd96a8225cf567e03c334ecc4`. Note: You must be connected to the internet to run the command. + +### LED Control The logic for controlling LED mode when `multiple cameras are connected` is not fully fleshed out. In its current state, LED control is only enabled when a Pi Camera Module is not in driver mode—meaning a USB camera on its own is unable to control the LEDs. -For now, if you are using multiple cameras, it is recommended that teams set the value of the NetworkTables entry :code:`photonvision/ledMode` from the robot code to control LED state. +For now, if you are using multiple cameras, it is recommended that teams set the value of the NetworkTables entry {code}`photonvision/ledMode` from the robot code to control LED state. -Commonly Seen Issues --------------------- +## Commonly Seen Issues +### Networking Issues -Networking Issues -^^^^^^^^^^^^^^^^^ +Please refer to our comprehensive {ref}`networking troubleshooting tips ` for debugging suggestions and possible causes. -Please refer to our comprehensive :ref:`networking troubleshooting tips ` for debugging suggestions and possible causes. +### Camera won't show up -Camera won't show up -^^^^^^^^^^^^^^^^^^^^ -Try these steps to :ref:`troubleshoot your camera connection `. +Try these steps to {ref}`troubleshoot your camera connection `. -If you are using a USB camera, it is possible your USB Camera isn't supported by CSCore and therefore won't work with PhotonVision. See :ref:`supported hardware page for more information `, or the above Camera Troubleshooting page for more information on determining this locally. +If you are using a USB camera, it is possible your USB Camera isn't supported by CSCore and therefore won't work with PhotonVision. See {ref}`supported hardware page for more information `, or the above Camera Troubleshooting page for more information on determining this locally. -Camera is consistently returning incorrect values when in 3D mode -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Read the tips on the :ref:`camera calibration page`, follow the advice there, and redo the calibration. +### Camera is consistently returning incorrect values when in 3D mode -Not getting data from PhotonLib -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Read the tips on the {ref}`camera calibration page`, follow the advice there, and redo the calibration. -1. Ensure your coprocessor version and PhotonLib version match. This can be checked by the settings tab and examining the .json itself (respectively). +### Not getting data from PhotonLib +1. Ensure your coprocessor version and PhotonLib version match. This can be checked by the settings tab and examining the .json itself (respectively). 2. Ensure that you have your team number set properly. - 3. Use Glass to verify that PhotonVision has connected to the NetworkTables server served by your robot. With Glass connected in client mode to your RoboRIO, we expect to see "photonvision" listed under the Clients tab of the NetworkTables Info pane. -.. image:: images/glass-connections.png - :width: 600 - :alt: Using Glass to check NT connections +```{image} images/glass-connections.png +:alt: Using Glass to check NT connections +:width: 600 +``` 4. When creating a `PhotonCamera` in code, does the `cameraName` provided match the name in the upper-right card of the web interface? Glass can be used to verify the RoboRIO is receiving NetworkTables data by inspecting the `photonvision` subtable for your camera nickname. -.. image:: images/camera-subtable.png - :width: 600 - :alt: Using Glass to check camera publishing +```{image} images/camera-subtable.png +:alt: Using Glass to check camera publishing +:width: 600 +``` + +### Unable to download PhotonLib -Unable to download PhotonLib -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Ensure all of your network firewalls are disabled and you aren't on a school-network. -PhotonVision prompts for login on startup -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This is normal. You don't need to connect a display to your Raspberry Pi to use PhotonVision, just navigate to the relevant webpage (ex. ``photonvision.local:5800``) in order to see the dashboard. +### PhotonVision prompts for login on startup + +This is normal. You don't need to connect a display to your Raspberry Pi to use PhotonVision, just navigate to the relevant webpage (ex. `photonvision.local:5800`) in order to see the dashboard. + +### Raspberry Pi enters into boot looping state when using PhotonVision -Raspberry Pi enters into boot looping state when using PhotonVision -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is most commonly seen when your Pi doesn't have adequate power / is being undervolted. Ensure that your power supply is functioning properly. diff --git a/docs/source/docs/troubleshooting/index.md b/docs/source/docs/troubleshooting/index.md new file mode 100644 index 0000000000..0b8f8384c8 --- /dev/null +++ b/docs/source/docs/troubleshooting/index.md @@ -0,0 +1,10 @@ +# Troubleshooting + +```{toctree} +:maxdepth: 1 + +common-errors +logging +camera-troubleshooting +networking-troubleshooting +``` diff --git a/docs/source/docs/troubleshooting/index.rst b/docs/source/docs/troubleshooting/index.rst deleted file mode 100644 index 22d50f1457..0000000000 --- a/docs/source/docs/troubleshooting/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Troubleshooting -================ - -.. toctree:: - :maxdepth: 1 - - common-errors - logging - camera-troubleshooting - networking-troubleshooting diff --git a/docs/source/docs/troubleshooting/logging.md b/docs/source/docs/troubleshooting/logging.md new file mode 100644 index 0000000000..ddf275a606 --- /dev/null +++ b/docs/source/docs/troubleshooting/logging.md @@ -0,0 +1,22 @@ +# Logging + +:::{note} +Logging is very helpful when trying to debug issues within PhotonVision, as it allows us to see what is happening within the program after it is ran. Whenever reporting an issue to PhotonVision, we request that you include logs whenever possible. +::: + +In addition to storing logs in timestamped files in the config directory, PhotonVision streams logs to the web dashboard. These logs can be viewed later by pressing the \` key. In this view, logs can be filtered by level or downloaded. + +:::{note} +When the program first starts, it sends logs from startup to the client that first connects. This does not happen on subsequent connections. +::: + +:::{note} +Logs are stored inside the {code}`photonvision_config/logs` directory. Exporting the settings ZIP will also download all old logs for further review. +::: + +```{raw} html + +``` diff --git a/docs/source/docs/troubleshooting/logging.rst b/docs/source/docs/troubleshooting/logging.rst deleted file mode 100644 index 162e7d3fd6..0000000000 --- a/docs/source/docs/troubleshooting/logging.rst +++ /dev/null @@ -1,17 +0,0 @@ -Logging -======= - -.. note:: Logging is very helpful when trying to debug issues within PhotonVision, as it allows us to see what is happening within the program after it is ran. Whenever reporting an issue to PhotonVision, we request that you include logs whenever possible. - -In addition to storing logs in timestamped files in the config directory, PhotonVision streams logs to the web dashboard. These logs can be viewed later by pressing the \` key. In this view, logs can be filtered by level or downloaded. - -.. note:: When the program first starts, it sends logs from startup to the client that first connects. This does not happen on subsequent connections. - -.. note:: Logs are stored inside the :code:`photonvision_config/logs` directory. Exporting the settings ZIP will also download all old logs for further review. - -.. raw:: html - - diff --git a/docs/source/docs/troubleshooting/networking-troubleshooting.md b/docs/source/docs/troubleshooting/networking-troubleshooting.md new file mode 100644 index 0000000000..d02520aae4 --- /dev/null +++ b/docs/source/docs/troubleshooting/networking-troubleshooting.md @@ -0,0 +1,47 @@ +# Networking Troubleshooting + +Before reading further, ensure that you follow all the recommendations {ref}`in our networking section `. You should follow these guidelines in order for PhotonVision to work properly; other networking setups are not officially supported. + +## Checklist + +A few issues make up the majority of support requests. Run through this checklist quickly to catch some common mistakes. + +- Is your camera connected to the robot's radio through a {ref}`network switch `? + - Ethernet straight from a laptop to a coprocessor will not work (most likely), due to the unreliability of link-local connections. + - Even if there's a switch between your laptop and coprocessor, you'll still want a radio or router in the loop somehow. + - The FRC radio is the *only* router we will officially support due to the innumerable variations between routers. +- (Raspberry Pi, Orange Pi & Limelight only) have you flashed the correct image, and is it up to date? + - Limelights 2/2+ and Gloworms should be flashed using the Limelight 2 image (eg, `photonvision-v2024.2.8-linuxarm64_limelight2.img.xz`). + - Limelights 3 should be flashed using the Limelight 3 image (eg, `photonvision-v2024.2.8-linuxarm64_limelight3.img.xz`). + - Raspberry Pi devices (including Pi 3, Pi 4, CM3 and CM4) should be flashed using the Raspberry Pi image (eg, `photonvision-v2024.2.8-linuxarm64_RaspberryPi.img.xz`). + - Orange Pi 5 devices should be flashed using the Orange Pi 5 image (eg, `photonvision-v2024.2.8-linuxarm64_orangepi5.img.xz`). + - Orange Pi 5+ devices should be flashed using the Orange Pi 5+ image (eg, `photonvision-v2024.2.8-linuxarm64_orangepi5plus.img.xz`). +- Is your robot code using a **2024** version of WPILib, and is your coprocessor using the most up to date **2024** release? + - 2022, 2023 and 2024 versions of either cannot be mix-and-matched! + - Your PhotonVision version can be checked on the {ref}`settings tab`. +- Is your team number correctly set on the {ref}`settings tab`? + +### photonvision.local Not Found + +Use [Angry IP Scanner](https://angryip.org/) and look for an IP that has port 5800 open. Then go to your web browser and do \:5800. + +Alternatively, you can plug your coprocessor into a display, plug in a keyboard, and run `hostname -I` in the terminal. This should give you the IP Address of your coprocessor, then go to your web browser and do \:5800. + +If nothing shows up, ensure your coprocessor has power, and you are following all of our networking recommendations, feel free to {ref}`contact us ` and we will help you. + +### Can't Connect To Robot + +Please check that: +1\. You don't have the NetworkTables Server on (toggleable in the settings tab). Turn this off when doing work on a robot. +2\. You have your team number set properly in the settings tab. +3\. Your camera name in the `PhotonCamera` constructor matches the name in the UI. +4\. You are using the 2024 version of WPILib and RoboRIO image. +5\. Your robot is on. + +If all of the above are met and you still have issues, feel free to {ref}`contact us ` and provide the following information: + +- The WPILib version used by your robot code +- PhotonLib vendor dependency version +- PhotonVision version (from the UI) +- Your settings exported from your coprocessor (if you're able to access it) +- How your RoboRIO/coprocessor are networked together diff --git a/docs/source/docs/troubleshooting/networking-troubleshooting.rst b/docs/source/docs/troubleshooting/networking-troubleshooting.rst deleted file mode 100644 index 07e4586c00..0000000000 --- a/docs/source/docs/troubleshooting/networking-troubleshooting.rst +++ /dev/null @@ -1,53 +0,0 @@ -Networking Troubleshooting -========================== - -Before reading further, ensure that you follow all the recommendations :ref:`in our networking section `. You should follow these guidelines in order for PhotonVision to work properly; other networking setups are not officially supported. - - -Checklist -^^^^^^^^^ - -A few issues make up the majority of support requests. Run through this checklist quickly to catch some common mistakes. - -- Is your camera connected to the robot's radio through a :ref:`network switch `? - - Ethernet straight from a laptop to a coprocessor will not work (most likely), due to the unreliability of link-local connections. - - Even if there's a switch between your laptop and coprocessor, you'll still want a radio or router in the loop somehow. - - The FRC radio is the *only* router we will officially support due to the innumerable variations between routers. -- (Raspberry Pi, Orange Pi & Limelight only) have you flashed the correct image, and is it up to date? - - Limelights 2/2+ and Gloworms should be flashed using the Limelight 2 image (eg, `photonvision-v2024.2.8-linuxarm64_limelight2.img.xz`). - - Limelights 3 should be flashed using the Limelight 3 image (eg, `photonvision-v2024.2.8-linuxarm64_limelight3.img.xz`). - - Raspberry Pi devices (including Pi 3, Pi 4, CM3 and CM4) should be flashed using the Raspberry Pi image (eg, `photonvision-v2024.2.8-linuxarm64_RaspberryPi.img.xz`). - - Orange Pi 5 devices should be flashed using the Orange Pi 5 image (eg, `photonvision-v2024.2.8-linuxarm64_orangepi5.img.xz`). - - Orange Pi 5+ devices should be flashed using the Orange Pi 5+ image (eg, `photonvision-v2024.2.8-linuxarm64_orangepi5plus.img.xz`). -- Is your robot code using a **2024** version of WPILib, and is your coprocessor using the most up to date **2024** release? - - 2022, 2023 and 2024 versions of either cannot be mix-and-matched! - - Your PhotonVision version can be checked on the :ref:`settings tab`. -- Is your team number correctly set on the :ref:`settings tab`? - - -photonvision.local Not Found ----------------------------- - -Use `Angry IP Scanner `_ and look for an IP that has port 5800 open. Then go to your web browser and do :5800. - -Alternatively, you can plug your coprocessor into a display, plug in a keyboard, and run ``hostname -I`` in the terminal. This should give you the IP Address of your coprocessor, then go to your web browser and do :5800. - -If nothing shows up, ensure your coprocessor has power, and you are following all of our networking recommendations, feel free to :ref:`contact us ` and we will help you. - -Can't Connect To Robot ----------------------- - -Please check that: -1. You don't have the NetworkTables Server on (toggleable in the settings tab). Turn this off when doing work on a robot. -2. You have your team number set properly in the settings tab. -3. Your camera name in the ``PhotonCamera`` constructor matches the name in the UI. -4. You are using the 2024 version of WPILib and RoboRIO image. -5. Your robot is on. - -If all of the above are met and you still have issues, feel free to :ref:`contact us ` and provide the following information: - -- The WPILib version used by your robot code -- PhotonLib vendor dependency version -- PhotonVision version (from the UI) -- Your settings exported from your coprocessor (if you're able to access it) -- How your RoboRIO/coprocessor are networked together diff --git a/docs/source/index.md b/docs/source/index.md new file mode 100644 index 0000000000..f624ad155e --- /dev/null +++ b/docs/source/index.md @@ -0,0 +1,128 @@ +```{image} assets/PhotonVision-Header-onWhite.png +:alt: PhotonVision +``` + +Welcome to the official documentation of PhotonVision! PhotonVision is the free, fast, and easy-to-use vision processing solution for the *FIRST* Robotics Competition. PhotonVision is designed to get vision working on your robot *quickly*, without the significant cost of other similar solutions. PhotonVision supports a variety of COTS hardware, including the Raspberry Pi 3 and 4, the [Gloworm smart camera](https://photonvision.github.io/gloworm-docs/docs/quickstart/#finding-gloworm), the [SnakeEyes Pi hat](https://www.playingwithfusion.com/productview.php?pdid=133), and the Orange Pi 5. + +# Content + +```{eval-rst} +.. grid:: 2 + + .. grid-item-card:: Getting Started + :link: docs/installation/index + :link-type: doc + + Get started with installing PhotonVision, creating a pipeline, and tuning it for usage in competitions. + + .. grid-item-card:: Programming Reference and PhotonLib + :link: docs/programming/index + :link-type: doc + + Learn more about PhotonLib, our vendor dependency which makes it easier for teams to retrieve vision data, make various calculations, and more. +``` + +```{eval-rst} +.. grid:: 2 + + .. grid-item-card:: Integration + :link: docs/integration/index + :link-type: doc + + Pick how to use vision processing results to control a physical robot. + + .. grid-item-card:: Code Examples + :link: docs/examples/index + :link-type: doc + + View various step by step guides on how to use data from PhotonVision in your code, along with game-specific examples. +``` + +```{eval-rst} +.. grid:: 2 + + .. grid-item-card:: Hardware + :link: docs/hardware/index + :link-type: doc + + Select appropriate hardware for high-quality and easy vision target detection. + + .. grid-item-card:: Contributing + :link: docs/contributing/index + :link-type: doc + + Interested in helping with PhotonVision? Learn more about how to contribute to our main code base, documentation, and more. +``` + +# Source Code + +The source code for all PhotonVision projects is available through our [GitHub organization](https://github.com/PhotonVision). + +- [PhotonVision](https://github.com/PhotonVision/photonvision) +- [PhotonVision ReadTheDocs](https://github.com/PhotonVision/photonvision-docs/) + +# Contact Us + +To report a bug or submit a feature request in PhotonVision, please [submit an issue on the PhotonVision GitHub](https://github.com/PhotonVision/photonvision) or [contact the developers on Discord](https://discord.com/invite/KS76FrX). + +If you find a problem in this documentation, please submit an issue on the [PhotonVision Documentation GitHub](https://github.com/PhotonVision/photonvision-docs). + +# License + +PhotonVision is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html). + +```{toctree} +:caption: Getting Started +:hidden: true +:maxdepth: 0 + +docs/description +docs/hardware/index +docs/installation/index +docs/settings +``` + +```{toctree} +:caption: Pipeline Tuning and Calibration +:hidden: true +:maxdepth: 0 + +docs/pipelines/index +docs/apriltag-pipelines/index +docs/reflectiveAndShape/index +docs/objectDetection/index +docs/calibration/calibration +``` + +```{toctree} +:caption: Programming Reference +:hidden: true +:maxdepth: 1 + +docs/programming/photonlib/index +docs/simulation/index +docs/integration/index +docs/examples/index +``` + +```{toctree} +:caption: Additional Resources +:hidden: true +:maxdepth: 1 + +docs/troubleshooting/index +docs/additional-resources/best-practices +docs/additional-resources/config +docs/additional-resources/nt-api +docs/contributing/index +``` + +```{toctree} +:caption: API Documentation +:hidden: true +:maxdepth: 1 + + Java + + C++ +``` diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 62a87b4c05..0000000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. image:: assets/PhotonVision-Header-onWhite.png - :alt: PhotonVision - -Welcome to the official documentation of PhotonVision! PhotonVision is the free, fast, and easy-to-use vision processing solution for the *FIRST*\ Robotics Competition. PhotonVision is designed to get vision working on your robot *quickly*, without the significant cost of other similar solutions. PhotonVision supports a variety of COTS hardware, including the Raspberry Pi 3 and 4, the `Gloworm smart camera `_, the `SnakeEyes Pi hat `_, and the Orange Pi 5. - -Content -------- - -.. grid:: 2 - - .. grid-item-card:: Getting Started - :link: docs/installation/index - :link-type: doc - - Get started with installing PhotonVision, creating a pipeline, and tuning it for usage in competitions. - - .. grid-item-card:: Programming Reference and PhotonLib - :link: docs/programming/index - :link-type: doc - - Learn more about PhotonLib, our vendor dependency which makes it easier for teams to retrieve vision data, make various calculations, and more. - -.. grid:: 2 - - .. grid-item-card:: Integration - :link: docs/integration/index - :link-type: doc - - Pick how to use vision processing results to control a physical robot. - - .. grid-item-card:: Code Examples - :link: docs/examples/index - :link-type: doc - - View various step by step guides on how to use data from PhotonVision in your code, along with game-specific examples. - -.. grid:: 2 - - .. grid-item-card:: Hardware - :link: docs/hardware/index - :link-type: doc - - Select appropriate hardware for high-quality and easy vision target detection. - - .. grid-item-card:: Contributing - :link: docs/contributing/index - :link-type: doc - - Interested in helping with PhotonVision? Learn more about how to contribute to our main code base, documentation, and more. - -Source Code ------------ - -The source code for all PhotonVision projects is available through our `GitHub organization `_. - -* `PhotonVision `_ -* `PhotonVision ReadTheDocs `_ - -Contact Us ----------- - -To report a bug or submit a feature request in PhotonVision, please `submit an issue on the PhotonVision GitHub `_ or `contact the developers on Discord `_. - -If you find a problem in this documentation, please submit an issue on the `PhotonVision Documentation GitHub `_. - -License -------- - -PhotonVision is licensed under the `GNU GPL v3 `_. - - -.. toctree:: - :maxdepth: 0 - :caption: Getting Started - :hidden: - - docs/description - docs/hardware/index - docs/installation/index - docs/settings - -.. toctree:: - :maxdepth: 0 - :caption: Pipeline Tuning and Calibration - :hidden: - - docs/pipelines/index - docs/apriltag-pipelines/index - docs/reflectiveAndShape/index - docs/objectDetection/index - docs/calibration/calibration - -.. toctree:: - :maxdepth: 1 - :caption: Programming Reference - :hidden: - - docs/programming/photonlib/index - docs/simulation/index - docs/integration/index - docs/examples/index - -.. toctree:: - :maxdepth: 1 - :caption: Additional Resources - :hidden: - - docs/troubleshooting/index - docs/additional-resources/best-practices - docs/additional-resources/config - docs/additional-resources/nt-api - docs/contributing/index diff --git a/photon-client/package-lock.json b/photon-client/package-lock.json index 5ce803504d..53c926e5d9 100644 --- a/photon-client/package-lock.json +++ b/photon-client/package-lock.json @@ -17,11 +17,12 @@ "three": "^0.160.0", "vue": "^2.7.14", "vue-router": "^3.6.5", + "vue-virtual-scroll-list": "^2.3.5", "vuetify": "^2.7.1" }, "devDependencies": { "@rushstack/eslint-patch": "^1.3.2", - "@types/node": "^16.11.45", + "@types/node": "^18.19.45", "@types/three": "^0.160.0", "@vitejs/plugin-vue2": "^2.3.1", "@vue/eslint-config-prettier": "^9.0.0", @@ -37,7 +38,7 @@ "terser": "^5.14.2", "typescript": "^5.3.3", "unplugin-vue-components": "^0.26.0", - "vite": "^4.5.1" + "vite": "^5.4.2" } }, "node_modules/@aashutoshrathi/word-wrap": { @@ -80,10 +81,26 @@ "node": ">=6.9.0" } }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, "node_modules/@esbuild/android-arm": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz", - "integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", "cpu": [ "arm" ], @@ -97,9 +114,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz", - "integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", "cpu": [ "arm64" ], @@ -113,9 +130,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz", - "integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", "cpu": [ "x64" ], @@ -129,9 +146,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", - "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", "cpu": [ "arm64" ], @@ -145,9 +162,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz", - "integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", "cpu": [ "x64" ], @@ -161,9 +178,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz", - "integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", "cpu": [ "arm64" ], @@ -177,9 +194,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz", - "integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", "cpu": [ "x64" ], @@ -193,9 +210,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz", - "integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", "cpu": [ "arm" ], @@ -209,9 +226,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz", - "integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", "cpu": [ "arm64" ], @@ -225,9 +242,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz", - "integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", "cpu": [ "ia32" ], @@ -241,9 +258,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz", - "integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", "cpu": [ "loong64" ], @@ -257,9 +274,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz", - "integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", "cpu": [ "mips64el" ], @@ -273,9 +290,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz", - "integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", "cpu": [ "ppc64" ], @@ -289,9 +306,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz", - "integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", "cpu": [ "riscv64" ], @@ -305,9 +322,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz", - "integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", "cpu": [ "s390x" ], @@ -321,9 +338,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz", - "integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", "cpu": [ "x64" ], @@ -337,9 +354,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz", - "integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", "cpu": [ "x64" ], @@ -353,9 +370,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz", - "integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", "cpu": [ "x64" ], @@ -369,9 +386,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz", - "integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", "cpu": [ "x64" ], @@ -385,9 +402,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz", - "integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", "cpu": [ "arm64" ], @@ -401,9 +418,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz", - "integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", "cpu": [ "ia32" ], @@ -417,9 +434,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz", - "integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", "cpu": [ "x64" ], @@ -666,6 +683,214 @@ } } }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.21.0.tgz", + "integrity": "sha512-WTWD8PfoSAJ+qL87lE7votj3syLavxunWhzCnx3XFxFiI/BA/r3X7MUM8dVrH8rb2r4AiO8jJsr3ZjdaftmnfA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.21.0.tgz", + "integrity": "sha512-a1sR2zSK1B4eYkiZu17ZUZhmUQcKjk2/j9Me2IDjk1GHW7LB5Z35LEzj9iJch6gtUfsnvZs1ZNyDW2oZSThrkA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.21.0.tgz", + "integrity": "sha512-zOnKWLgDld/svhKO5PD9ozmL6roy5OQ5T4ThvdYZLpiOhEGY+dp2NwUmxK0Ld91LrbjrvtNAE0ERBwjqhZTRAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.21.0.tgz", + "integrity": "sha512-7doS8br0xAkg48SKE2QNtMSFPFUlRdw9+votl27MvT46vo44ATBmdZdGysOevNELmZlfd+NEa0UYOA8f01WSrg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.21.0.tgz", + "integrity": "sha512-pWJsfQjNWNGsoCq53KjMtwdJDmh/6NubwQcz52aEwLEuvx08bzcy6tOUuawAOncPnxz/3siRtd8hiQ32G1y8VA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.21.0.tgz", + "integrity": "sha512-efRIANsz3UHZrnZXuEvxS9LoCOWMGD1rweciD6uJQIx2myN3a8Im1FafZBzh7zk1RJ6oKcR16dU3UPldaKd83w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.21.0.tgz", + "integrity": "sha512-ZrPhydkTVhyeGTW94WJ8pnl1uroqVHM3j3hjdquwAcWnmivjAwOYjTEAuEDeJvGX7xv3Z9GAvrBkEzCgHq9U1w==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.21.0.tgz", + "integrity": "sha512-cfaupqd+UEFeURmqNP2eEvXqgbSox/LHOyN9/d2pSdV8xTrjdg3NgOFJCtc1vQ/jEke1qD0IejbBfxleBPHnPw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.21.0.tgz", + "integrity": "sha512-ZKPan1/RvAhrUylwBXC9t7B2hXdpb/ufeu22pG2psV7RN8roOfGurEghw1ySmX/CmDDHNTDDjY3lo9hRlgtaHg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.21.0.tgz", + "integrity": "sha512-H1eRaCwd5E8eS8leiS+o/NqMdljkcb1d6r2h4fKSsCXQilLKArq6WS7XBLDu80Yz+nMqHVFDquwcVrQmGr28rg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.21.0.tgz", + "integrity": "sha512-zJ4hA+3b5tu8u7L58CCSI0A9N1vkfwPhWd/puGXwtZlsB5bTkwDNW/+JCU84+3QYmKpLi+XvHdmrlwUwDA6kqw==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.21.0.tgz", + "integrity": "sha512-e2hrvElFIh6kW/UNBQK/kzqMNY5mO+67YtEh9OA65RM5IJXYTWiXjX6fjIiPaqOkBthYF1EqgiZ6OXKcQsM0hg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.21.0.tgz", + "integrity": "sha512-1vvmgDdUSebVGXWX2lIcgRebqfQSff0hMEkLJyakQ9JQUbLDkEaMsPTLOmyccyC6IJ/l3FZuJbmrBw/u0A0uCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.21.0.tgz", + "integrity": "sha512-s5oFkZ/hFcrlAyBTONFY1TWndfyre1wOMwU+6KCpm/iatybvrRgmZVM+vCFwxmC5ZhdlgfE0N4XorsDpi7/4XQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.21.0.tgz", + "integrity": "sha512-G9+TEqRnAA6nbpqyUqgTiopmnfgnMkR3kMukFBDsiyy23LZvUCpiUwjTRx6ezYCjJODXrh52rBR9oXvm+Fp5wg==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.21.0.tgz", + "integrity": "sha512-2jsCDZwtQvRhejHLfZ1JY6w6kEuEtfF9nzYsZxzSlNVKDX+DpsDJ+Rbjkm74nvg2rdx0gwBS+IMdvwJuq3S9pQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, "node_modules/@rushstack/eslint-patch": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.6.1.tgz", @@ -707,10 +932,13 @@ "dev": true }, "node_modules/@types/node": { - "version": "16.18.69", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.69.tgz", - "integrity": "sha512-AfDKv5fWd9XStaEuqFa6PYcM8FgTqxVMsP4BPk60emeB9YX+pp2P0zZ8nU1BQg8hyPGFrMt7MGMRMis8IrcPyg==", - "dev": true + "version": "18.19.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.45.tgz", + "integrity": "sha512-VZxPKNNhjKmaC1SUYowuXSRSMGyQGmQjvvA1xE4QZ0xce2kLtEhPDS+kqpCPBZYgqblCLQ2DAjSzmgCM5auvhA==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/raf": { "version": "3.4.3", @@ -1965,9 +2193,9 @@ } }, "node_modules/esbuild": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", - "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", "dev": true, "hasInstallScript": true, "bin": { @@ -1977,28 +2205,29 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/android-arm": "0.18.20", - "@esbuild/android-arm64": "0.18.20", - "@esbuild/android-x64": "0.18.20", - "@esbuild/darwin-arm64": "0.18.20", - "@esbuild/darwin-x64": "0.18.20", - "@esbuild/freebsd-arm64": "0.18.20", - "@esbuild/freebsd-x64": "0.18.20", - "@esbuild/linux-arm": "0.18.20", - "@esbuild/linux-arm64": "0.18.20", - "@esbuild/linux-ia32": "0.18.20", - "@esbuild/linux-loong64": "0.18.20", - "@esbuild/linux-mips64el": "0.18.20", - "@esbuild/linux-ppc64": "0.18.20", - "@esbuild/linux-riscv64": "0.18.20", - "@esbuild/linux-s390x": "0.18.20", - "@esbuild/linux-x64": "0.18.20", - "@esbuild/netbsd-x64": "0.18.20", - "@esbuild/openbsd-x64": "0.18.20", - "@esbuild/sunos-x64": "0.18.20", - "@esbuild/win32-arm64": "0.18.20", - "@esbuild/win32-ia32": "0.18.20", - "@esbuild/win32-x64": "0.18.20" + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" } }, "node_modules/escalade": { @@ -3780,9 +4009,9 @@ "optional": true }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -3868,9 +4097,9 @@ } }, "node_modules/postcss": { - "version": "8.4.32", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.32.tgz", - "integrity": "sha512-D/kj5JNu6oo2EIy+XL/26JEDTlIbB8hw85G8StOE6L74RQAVVP5rej6wxCNqyMbR4RkPfqvezVbPw81Ngd6Kcw==", + "version": "8.4.41", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.41.tgz", + "integrity": "sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==", "funding": [ { "type": "opencollective", @@ -3887,8 +4116,8 @@ ], "dependencies": { "nanoid": "^3.3.7", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "picocolors": "^1.0.1", + "source-map-js": "^1.2.0" }, "engines": { "node": "^10 || ^12 || >=14" @@ -4117,18 +4346,37 @@ } }, "node_modules/rollup": { - "version": "3.29.4", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", - "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.21.0.tgz", + "integrity": "sha512-vo+S/lfA2lMS7rZ2Qoubi6I5hwZwzXeUIctILZLbHI+laNtvhhOIon2S1JksA5UEDQ7l3vberd0fxK44lTYjbQ==", "dev": true, + "dependencies": { + "@types/estree": "1.0.5" + }, "bin": { "rollup": "dist/bin/rollup" }, "engines": { - "node": ">=14.18.0", + "node": ">=18.0.0", "npm": ">=8.0.0" }, "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.21.0", + "@rollup/rollup-android-arm64": "4.21.0", + "@rollup/rollup-darwin-arm64": "4.21.0", + "@rollup/rollup-darwin-x64": "4.21.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.21.0", + "@rollup/rollup-linux-arm-musleabihf": "4.21.0", + "@rollup/rollup-linux-arm64-gnu": "4.21.0", + "@rollup/rollup-linux-arm64-musl": "4.21.0", + "@rollup/rollup-linux-powerpc64le-gnu": "4.21.0", + "@rollup/rollup-linux-riscv64-gnu": "4.21.0", + "@rollup/rollup-linux-s390x-gnu": "4.21.0", + "@rollup/rollup-linux-x64-gnu": "4.21.0", + "@rollup/rollup-linux-x64-musl": "4.21.0", + "@rollup/rollup-win32-arm64-msvc": "4.21.0", + "@rollup/rollup-win32-ia32-msvc": "4.21.0", + "@rollup/rollup-win32-x64-msvc": "4.21.0", "fsevents": "~2.3.2" } }, @@ -4395,9 +4643,9 @@ } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", "engines": { "node": ">=0.10.0" } @@ -4827,6 +5075,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/unplugin": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-1.6.0.tgz", @@ -4966,32 +5220,33 @@ } }, "node_modules/vite": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.1.tgz", - "integrity": "sha512-AXXFaAJ8yebyqzoNB9fu2pHoo/nWX+xZlaRwoeYUxEqBO+Zj4msE5G+BhGBll9lYEKv9Hfks52PAF2X7qDYXQA==", + "version": "5.4.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.2.tgz", + "integrity": "sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==", "dev": true, "dependencies": { - "esbuild": "^0.18.10", - "postcss": "^8.4.27", - "rollup": "^3.27.1" + "esbuild": "^0.21.3", + "postcss": "^8.4.41", + "rollup": "^4.20.0" }, "bin": { "vite": "bin/vite.js" }, "engines": { - "node": "^14.18.0 || >=16.0.0" + "node": "^18.0.0 || >=20.0.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" }, "optionalDependencies": { - "fsevents": "~2.3.2" + "fsevents": "~2.3.3" }, "peerDependencies": { - "@types/node": ">= 14", + "@types/node": "^18.0.0 || >=20.0.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", + "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.4.0" @@ -5009,6 +5264,9 @@ "sass": { "optional": true }, + "sass-embedded": { + "optional": true + }, "stylus": { "optional": true }, @@ -5084,6 +5342,11 @@ "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.6.5.tgz", "integrity": "sha512-VYXZQLtjuvKxxcshuRAwjHnciqZVoXAjTjcqBTz4rKc8qih9g9pI3hbDjmqXaHdgL3v8pV6P8Z335XvHzESxLQ==" }, + "node_modules/vue-virtual-scroll-list": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/vue-virtual-scroll-list/-/vue-virtual-scroll-list-2.3.5.tgz", + "integrity": "sha512-YFK6u5yltqtAOfTBcij/KGAS2SoZvzbNIAf9qTULauPObEp53xj22tDuohrrM2vNkgoD5kejXICIUBt2Q4ZDqQ==" + }, "node_modules/vuetify": { "version": "2.7.1", "resolved": "https://registry.npmjs.org/vuetify/-/vuetify-2.7.1.tgz", diff --git a/photon-client/package.json b/photon-client/package.json index a2ab77c4ca..69a40a2991 100644 --- a/photon-client/package.json +++ b/photon-client/package.json @@ -2,6 +2,7 @@ "name": "photonclient", "version": "0.0.0", "private": true, + "type": "module", "scripts": { "dev": "vite", "build": "run-p build-only", @@ -23,11 +24,12 @@ "three": "^0.160.0", "vue": "^2.7.14", "vue-router": "^3.6.5", + "vue-virtual-scroll-list": "^2.3.5", "vuetify": "^2.7.1" }, "devDependencies": { "@rushstack/eslint-patch": "^1.3.2", - "@types/node": "^16.11.45", + "@types/node": "^18.19.45", "@types/three": "^0.160.0", "@vitejs/plugin-vue2": "^2.3.1", "@vue/eslint-config-prettier": "^9.0.0", @@ -43,6 +45,6 @@ "terser": "^5.14.2", "typescript": "^5.3.3", "unplugin-vue-components": "^0.26.0", - "vite": "^4.5.1" + "vite": "^5.4.2" } } diff --git a/photon-client/src/components/app/photon-camera-stream.vue b/photon-client/src/components/app/photon-camera-stream.vue index 34465e9389..2fa78fb9f9 100644 --- a/photon-client/src/components/app/photon-camera-stream.vue +++ b/photon-client/src/components/app/photon-camera-stream.vue @@ -57,13 +57,13 @@ const handleFullscreenRequest = () => { const mjpgStream: any = ref(null); onBeforeUnmount(() => { if (!mjpgStream.value) return; - mjpgStream.value["src"] = null; + mjpgStream.value["src"] = "//:0"; });