From 96d34e7b5e85a7eb1fad2f2eb4dca8847784ca2f Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 5 Sep 2024 23:08:10 -0500 Subject: [PATCH] Code Block cleanup/commonization --- .../docs/apriltag-pipelines/multitag.md | 7 +++- docs/source/docs/installation/networking.md | 6 ++- .../docs/integration/advancedStrategies.md | 2 +- .../programming/photonlib/controlling-led.md | 8 +++- .../photonlib/driver-mode-pipeline-index.md | 20 ++++++++-- .../photonlib/getting-target-data.md | 38 +++++++++---------- .../photonlib/robot-pose-estimator.md | 22 ++++++++--- .../photonlib/using-target-data.md | 36 +++++++++--------- .../source/docs/simulation/simulation-java.md | 24 ++++++------ 9 files changed, 100 insertions(+), 63 deletions(-) diff --git a/docs/source/docs/apriltag-pipelines/multitag.md b/docs/source/docs/apriltag-pipelines/multitag.md index 102b36ce1f..e70c198be4 100644 --- a/docs/source/docs/apriltag-pipelines/multitag.md +++ b/docs/source/docs/apriltag-pipelines/multitag.md @@ -24,7 +24,7 @@ This multi-target pose estimate can be accessed using PhotonLib. We suggest usin ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java var result = camera.getLatestResult(); if (result.getMultiTagResult().estimatedPose.isPresent) { @@ -38,6 +38,11 @@ This multi-target pose estimate can be accessed using PhotonLib. We suggest usin if (result.MultiTagResult().result.isPresent) { frc::Transform3d fieldToCamera = result.MultiTagResult().result.best; } + + .. code-block:: Python + + # TODO + ``` :::{note} diff --git a/docs/source/docs/installation/networking.md b/docs/source/docs/installation/networking.md index fd46ad3873..9c3594bd9b 100644 --- a/docs/source/docs/installation/networking.md +++ b/docs/source/docs/installation/networking.md @@ -44,13 +44,17 @@ If you would like to access your Ethernet-connected vision device from a compute ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java PortForwarder.add(5800, "photonvision.local", 5800); .. code-block:: C++ wpi::PortForwarder::GetInstance().Add(5800, "photonvision.local", 5800); + + .. code-block:: Python + + # TODO ``` :::{note} diff --git a/docs/source/docs/integration/advancedStrategies.md b/docs/source/docs/integration/advancedStrategies.md index 164d1756a4..589a10a3a5 100644 --- a/docs/source/docs/integration/advancedStrategies.md +++ b/docs/source/docs/integration/advancedStrategies.md @@ -39,7 +39,7 @@ A simple way to use a pose estimate is to activate robot functions automatically ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java Pose3d robotPose; boolean launcherSpinCmd; diff --git a/docs/source/docs/programming/photonlib/controlling-led.md b/docs/source/docs/programming/photonlib/controlling-led.md index 34649ddfd5..5997698171 100644 --- a/docs/source/docs/programming/photonlib/controlling-led.md +++ b/docs/source/docs/programming/photonlib/controlling-led.md @@ -4,13 +4,17 @@ You can control the vision LEDs of supported hardware via PhotonLib using the `s ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Blink the LEDs. camera.setLED(VisionLEDMode.kBlink); - .. code-block:: c++ + .. code-block:: C++ // Blink the LEDs. camera.SetLED(photonlib::VisionLEDMode::kBlink); + + .. code-block:: Python + + # TODO ``` diff --git a/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md index 2b9444dc29..2ae34447d9 100644 --- a/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md +++ b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.md @@ -9,7 +9,7 @@ You can use the `setDriverMode()`/`SetDriverMode()` (Java and C++ respectively) ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Set driver mode to on. camera.setDriverMode(true); @@ -18,6 +18,10 @@ You can use the `setDriverMode()`/`SetDriverMode()` (Java and C++ respectively) // Set driver mode to on. camera.SetDriverMode(true); + + .. code-block:: Python + + # TODO ``` ## Setting the Pipeline Index @@ -27,7 +31,7 @@ You can use the `setPipelineIndex()`/`SetPipelineIndex()` (Java and C++ respecti ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Change pipeline to 2 camera.setPipelineIndex(2); @@ -36,6 +40,10 @@ You can use the `setPipelineIndex()`/`SetPipelineIndex()` (Java and C++ respecti // Change pipeline to 2 camera.SetPipelineIndex(2); + + .. code-block:: Python + + # TODO ``` ## Getting the Pipeline Latency @@ -44,15 +52,19 @@ You can also get the pipeline latency from a pipeline result using the `getLaten ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Get the pipeline latency. double latencySeconds = result.getLatencyMillis() / 1000.0; - .. code-block:: c++ + .. code-block:: C++ // Get the pipeline latency. units::second_t latency = result.GetLatency(); + + .. code-block:: Python + + # TODO ``` :::{note} diff --git a/docs/source/docs/programming/photonlib/getting-target-data.md b/docs/source/docs/programming/photonlib/getting-target-data.md index f22e837559..ceea28a898 100644 --- a/docs/source/docs/programming/photonlib/getting-target-data.md +++ b/docs/source/docs/programming/photonlib/getting-target-data.md @@ -20,7 +20,7 @@ The `PhotonCamera` class has two constructors: one that takes a `NetworkTable` a :language: c++ :lines: 42-43 - .. code-block:: python + .. code-block:: Python # Change this to match the name of your camera as shown in the web ui self.camera = PhotonCamera("your_camera_name_here") @@ -51,7 +51,7 @@ Use the `getLatestResult()`/`GetLatestResult()` (Java and C++ respectively) to o :language: c++ :lines: 35-36 - .. code-block:: python + .. code-block:: Python # Query the latest result from PhotonVision result = self.camera.getLatestResult() @@ -69,17 +69,17 @@ Each pipeline result has a `hasTargets()`/`HasTargets()` (Java and C++ respectiv ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Check if the latest result has any targets. boolean hasTargets = result.hasTargets(); - .. code-block:: c++ + .. code-block:: C++ // Check if the latest result has any targets. bool hasTargets = result.HasTargets(); - .. code-block:: python + .. code-block:: Python # Check if the latest result has any targets. hasTargets = result.hasTargets() @@ -99,17 +99,17 @@ You can get a list of tracked targets using the `getTargets()`/`GetTargets()` (J ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Get a list of currently tracked targets. List targets = result.getTargets(); - .. code-block:: c++ + .. code-block:: C++ // Get a list of currently tracked targets. wpi::ArrayRef targets = result.GetTargets(); - .. code-block:: python + .. code-block:: Python # Get a list of currently tracked targets. targets = result.getTargets() @@ -121,18 +121,18 @@ You can get the {ref}`best target corners = target.getCorners(); - .. code-block:: c++ + .. code-block:: C++ // Get information from target. double yaw = target.GetYaw(); @@ -169,7 +169,7 @@ You can get the {ref}`best target , 4> corners = target.GetCorners(); - .. code-block:: python + .. code-block:: Python # Get information from target. yaw = target.getYaw() @@ -193,7 +193,7 @@ All of the data above (**except skew**) is available when using AprilTags. ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Get information from target. int targetID = target.getFiducialId(); @@ -201,7 +201,7 @@ All of the data above (**except skew**) is available when using AprilTags. Transform3d bestCameraToTarget = target.getBestCameraToTarget(); Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); - .. code-block:: c++ + .. code-block:: C++ // Get information from target. int targetID = target.GetFiducialId(); @@ -209,7 +209,7 @@ All of the data above (**except skew**) is available when using AprilTags. frc::Transform3d bestCameraToTarget = target.getBestCameraToTarget(); frc::Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); - .. code-block:: python + .. code-block:: Python # Get information from target. targetID = target.getFiducialId() @@ -227,7 +227,7 @@ Images are stored within the PhotonVision configuration directory. Running the " ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Capture pre-process camera stream image camera.takeInputSnapshot(); @@ -243,7 +243,7 @@ Images are stored within the PhotonVision configuration directory. Running the " // Capture post-process camera stream image camera.TakeOutputSnapshot(); - .. code-block:: python + .. code-block:: Python # Capture pre-process camera stream image camera.takeInputSnapshot() diff --git a/docs/source/docs/programming/photonlib/robot-pose-estimator.md b/docs/source/docs/programming/photonlib/robot-pose-estimator.md index 2e288f3fc3..b878be2710 100644 --- a/docs/source/docs/programming/photonlib/robot-pose-estimator.md +++ b/docs/source/docs/programming/photonlib/robot-pose-estimator.md @@ -14,16 +14,20 @@ The API documentation can be found in here: [Java](https://github.wpilib.org/all ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // The field from AprilTagFields will be different depending on the game. AprilTagFieldLayout aprilTagFieldLayout = AprilTagFields.k2024Crescendo.loadAprilTagLayoutField(); - .. code-block:: c++ + .. code-block:: C++ // The parameter for LoadAPrilTagLayoutField will be different depending on the game. frc::AprilTagFieldLayout aprilTagFieldLayout = frc::LoadAprilTagLayoutField(frc::AprilTagField::k2024Crescendo); + .. code-block:: Python + + # TODO + ``` ## Creating a `PhotonPoseEstimator` @@ -46,7 +50,7 @@ The PhotonPoseEstimator has a constructor that takes an `AprilTagFieldLayout` (s ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java //Forward Camera cam = new PhotonCamera("testCamera"); @@ -55,7 +59,7 @@ The PhotonPoseEstimator has a constructor that takes an `AprilTagFieldLayout` (s // Construct PhotonPoseEstimator PhotonPoseEstimator photonPoseEstimator = new PhotonPoseEstimator(aprilTagFieldLayout, PoseStrategy.CLOSEST_TO_REFERENCE_POSE, cam, robotToCam); - .. code-block:: c++ + .. code-block:: C++ // Forward Camera std::shared_ptr cameraOne = @@ -76,6 +80,10 @@ The PhotonPoseEstimator has a constructor that takes an `AprilTagFieldLayout` (s photonlib::RobotPoseEstimator estimator( aprilTags, photonlib::CLOSEST_TO_REFERENCE_POSE, cameras); + + .. code-block:: Python + + # TODO ``` ## Using a `PhotonPoseEstimator` @@ -88,7 +96,7 @@ Calling `update()` on your `PhotonPoseEstimator` will return an `EstimatedRobotP :language: java :lines: 85-88 - .. code-block:: c++ + .. code-block:: C++ std::pair getEstimatedGlobalPose( frc::Pose3d prevEstimatedRobotPose) { @@ -102,6 +110,10 @@ Calling `update()` on your `PhotonPoseEstimator` will return an `EstimatedRobotP return std::make_pair(frc::Pose2d(), 0_ms); } } + + .. code-block:: Python + + # TODO ``` You should be updating your [drivetrain pose estimator](https://docs.wpilib.org/en/latest/docs/software/advanced-controls/state-space/state-space-pose-estimators.html) with the result from the `RobotPoseEstimator` every loop using `addVisionMeasurement()`. TODO: add example note diff --git a/docs/source/docs/programming/photonlib/using-target-data.md b/docs/source/docs/programming/photonlib/using-target-data.md index ed27689e32..6a917cdc9b 100644 --- a/docs/source/docs/programming/photonlib/using-target-data.md +++ b/docs/source/docs/programming/photonlib/using-target-data.md @@ -8,15 +8,15 @@ A `PhotonUtils` class with helpful common calculations is included within `Photo ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Calculate robot's field relative pose Pose3d robotPose = PhotonUtils.estimateFieldToRobotAprilTag(target.getBestCameraToTarget(), aprilTagFieldLayout.getTagPose(target.getFiducialId()), cameraToRobot); - .. code-block:: c++ + .. code-block:: C++ //TODO - .. code-block:: python + .. code-block:: Python # TODO ``` @@ -27,19 +27,19 @@ You can get your robot's `Pose2D` on the field using various camera data, target ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Calculate robot's field relative pose Pose2D robotPose = PhotonUtils.estimateFieldToRobot( kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, Rotation2d.fromDegrees(-target.getYaw()), gyro.getRotation2d(), targetPose, cameraToRobot); - .. code-block:: c++ + .. code-block:: C++ // Calculate robot's field relative pose frc::Pose2D robotPose = photonlib::EstimateFieldToRobot( kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, frc::Rotation2d(units::degree_t(-target.GetYaw())), frc::Rotation2d(units::degree_t(gyro.GetRotation2d)), targetPose, cameraToRobot); - .. code-block:: python + .. code-block:: Python # TODO @@ -52,15 +52,15 @@ If your camera is at a fixed height on your robot and the height of the target i ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // TODO - .. code-block:: c++ + .. code-block:: C++ // TODO - .. code-block:: python + .. code-block:: Python # TODO @@ -76,15 +76,15 @@ The C++ version of PhotonLib uses the Units library. For more information, see [ ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java double distanceToTarget = PhotonUtils.getDistanceToPose(robotPose, targetPose); - .. code-block:: c++ + .. code-block:: C++ //TODO - .. code-block:: python + .. code-block:: Python # TODO ``` @@ -95,19 +95,19 @@ You can get a [translation](https://docs.wpilib.org/en/latest/docs/software/adva ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Calculate a translation from the camera to the target. Translation2d translation = PhotonUtils.estimateCameraToTargetTranslation( distanceMeters, Rotation2d.fromDegrees(-target.getYaw())); - .. code-block:: c++ + .. code-block:: C++ // Calculate a translation from the camera to the target. frc::Translation2d translation = photonlib::PhotonUtils::EstimateCameraToTargetTranslationn( distance, frc::Rotation2d(units::degree_t(-target.GetYaw()))); - .. code-block:: python + .. code-block:: Python # TODO @@ -123,14 +123,14 @@ We are negating the yaw from the camera from CV (computer vision) conventions to ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java Rotation2d targetYaw = PhotonUtils.getYawToPose(robotPose, targetPose); - .. code-block:: c++ + .. code-block:: C++ //TODO - .. code-block:: python + .. code-block:: Python # TODO ``` diff --git a/docs/source/docs/simulation/simulation-java.md b/docs/source/docs/simulation/simulation-java.md index f7f6c10be1..7b4f99da97 100644 --- a/docs/source/docs/simulation/simulation-java.md +++ b/docs/source/docs/simulation/simulation-java.md @@ -55,7 +55,7 @@ A `VisionSystemSim` represents the simulated world for one or more cameras, and ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // A vision system sim labelled as "main" in NetworkTables VisionSystemSim visionSim = new VisionSystemSim("main"); @@ -68,7 +68,7 @@ Vision targets require a `TargetModel`, which describes the shape of the target. ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // A 0.5 x 0.25 meter rectangular target TargetModel targetModel = new TargetModel(0.5, 0.25); @@ -79,7 +79,7 @@ These `TargetModel` are paired with a target pose to create a `VisionTargetSim`. ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // The pose of where the target is on the field. // Its rotation determines where "forward" or the target x-axis points. @@ -101,7 +101,7 @@ For convenience, an `AprilTagFieldLayout` can also be added to automatically cre ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // The layout of AprilTags which we want to add to the vision system AprilTagFieldLayout tagLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.k2024Crescendo.m_resourceFile); @@ -122,7 +122,7 @@ Before adding a simulated camera, we need to define its properties. This is done ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // The simulated camera properties SimCameraProperties cameraProp = new SimCameraProperties(); @@ -133,7 +133,7 @@ By default, this will create a 960 x 720 resolution camera with a 90 degree diag ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // A 640 x 480 camera with a 100 degree diagonal FOV. cameraProp.setCalibration(640, 480, Rotation2d.fromDegrees(100)); @@ -151,7 +151,7 @@ These properties are used in a `PhotonCameraSim`, which handles generating captu ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // The PhotonCamera used in the real robot code. PhotonCamera camera = new PhotonCamera("cameraName"); @@ -165,7 +165,7 @@ The `PhotonCameraSim` can now be added to the `VisionSystemSim`. We have to defi ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Our camera is mounted 0.1 meters forward and 0.5 meters up from the robot pose, // (Robot pose is considered the center of rotation at the floor level, or Z = 0) @@ -187,7 +187,7 @@ If the camera is mounted on a mobile mechanism (like a turret) this transform ca ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // The turret the camera is mounted on is rotated 5 degrees Rotation3d turretRotation = new Rotation3d(0, 0, Math.toRadians(5)); @@ -204,7 +204,7 @@ To update the `VisionSystemSim`, we simply have to pass in the simulated robot p ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Update with the simulated drivetrain pose. This should be called every loop in simulation. visionSim.update(robotPoseMeters); @@ -219,7 +219,7 @@ Each `VisionSystemSim` has its own built-in `Field2d` for displaying object pose ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Get the built-in Field2d used by this VisionSystemSim visionSim.getDebugField(); @@ -234,7 +234,7 @@ A `PhotonCameraSim` can also draw and publish generated camera frames to a MJPEG ```{eval-rst} .. tab-set-code:: - .. code-block:: java + .. code-block:: Java // Enable the raw and processed streams. These are enabled by default. cameraSim.enableRawStream(true);