From 5a5a64657b73c3c53eab4378d1605074df4d1eca Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 10 May 2024 11:07:22 -0700 Subject: [PATCH 01/10] Use readqueue for results and change ownership --- photon-lib/build.gradle | 2 + photon-lib/py/photonlibpy/photonCamera.py | 34 ++ .../java/org/photonvision/PhotonCamera.java | 54 ++- .../org/photonvision/PhotonPoseEstimator.java | 44 +- .../main/native/cpp/photon/PhotonCamera.cpp | 43 +- .../native/cpp/photon/PhotonPoseEstimator.cpp | 39 +- .../cpp/photon/simulation/PhotonCameraSim.cpp | 384 ++++++++++++++++++ .../photon/simulation/SimCameraProperties.cpp | 219 ++++++++++ .../main/native/include/photon/PhotonCamera.h | 20 +- .../include/photon/PhotonPoseEstimator.h | 55 +-- .../photon/simulation/PhotonCameraSim.h | 384 ++---------------- .../photon/simulation/SimCameraProperties.h | 190 +-------- .../photon/simulation/SimPhotonCamera.h | 137 ------- .../photon/simulation/SimVisionSystem.h | 226 ----------- .../photon/simulation/SimVisionTarget.h | 65 --- .../photonvision/PhotonPoseEstimatorTest.java | 33 +- .../estimation/ApriltagWorkbenchTest.java | 10 +- .../native/cpp/PhotonPoseEstimatorTest.cpp | 105 +++-- .../test/native/cpp/SimVisionSystemTest.cpp | 358 ---------------- .../test/native/cpp/VisionSystemSimTest.cpp | 19 +- .../networktables/PacketSubscriber.java | 65 ++- .../include/photon/estimation/OpenCVHelp.h | 2 + .../photon/estimation/VisionEstimation.h | 1 + .../src/main/include/PhotonCameraWrapper.h | 12 +- .../src/main/java/frc/robot/Vision.java | 31 +- .../subsystems/drivetrain/SwerveDriveSim.java | 9 +- shared/config.gradle | 3 + 27 files changed, 984 insertions(+), 1560 deletions(-) create mode 100644 photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp create mode 100644 photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp delete mode 100644 photon-lib/src/main/native/include/photon/simulation/SimPhotonCamera.h delete mode 100644 photon-lib/src/main/native/include/photon/simulation/SimVisionSystem.h delete mode 100644 photon-lib/src/main/native/include/photon/simulation/SimVisionTarget.h delete mode 100644 photon-lib/src/test/native/cpp/SimVisionSystemTest.cpp diff --git a/photon-lib/build.gradle b/photon-lib/build.gradle index 9e76b6cceb..90cb4365bb 100644 --- a/photon-lib/build.gradle +++ b/photon-lib/build.gradle @@ -53,6 +53,8 @@ model { nativeUtils.useRequiredLibrary(it, "wpilib_shared") nativeUtils.useRequiredLibrary(it, "apriltag_shared") nativeUtils.useRequiredLibrary(it, "opencv_shared") + nativeUtils.useRequiredLibrary(it, "cscore_shared") + nativeUtils.useRequiredLibrary(it, "cameraserver_shared") } } testSuites { diff --git a/photon-lib/py/photonlibpy/photonCamera.py b/photon-lib/py/photonlibpy/photonCamera.py index 8be306aa5b..151d668816 100644 --- a/photon-lib/py/photonlibpy/photonCamera.py +++ b/photon-lib/py/photonlibpy/photonCamera.py @@ -1,4 +1,5 @@ from enum import Enum +from typing import List import ntcore from wpilib import Timer import wpilib @@ -75,6 +76,39 @@ def __init__(self, cameraName: str): self._prevHeartbeat = 0 self._prevHeartbeatChangeTime = Timer.getFPGATimestamp() + def getAllUnreadResults(self) -> List[PhotonPipelineResult]: + """ + The list of pipeline results sent by PhotonVision since the last call to getAllUnreadResults(). + Calling this function clears the internal FIFO queue, and multiple calls to + getAllUnreadResults() will return different (potentially empty) result arrays. Be careful to + call this exactly ONCE per loop of your robot code! FIFO depth is limited to 20 changes, so + make sure to call this frequently enough to avoid old results being discarded, too! + """ + + self._versionCheck() + + changes = self._rawBytesEntry.readQueue() + + ret = [] + + for change in changes: + byteList = change.value + timestamp = change.time + + if len(byteList) < 1: + pass + else: + newResult = PhotonPipelineResult() + pkt = Packet(byteList) + newResult.populateFromPacket(pkt) + # NT4 allows us to correct the timestamp based on when the message was sent + newResult.setTimestampSeconds( + timestamp / 1e6 - newResult.getLatencyMillis() / 1e3 + ) + ret.append(newResult) + + return ret + def getLatestResult(self) -> PhotonPipelineResult: self._versionCheck() diff --git a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java index 6dbb6811e2..6f8e853b8e 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java @@ -45,6 +45,8 @@ import edu.wpi.first.networktables.StringSubscriber; import edu.wpi.first.wpilibj.DriverStation; import edu.wpi.first.wpilibj.Timer; +import java.util.ArrayList; +import java.util.List; import java.util.Optional; import java.util.Set; import org.photonvision.common.hardware.VisionLEDMode; @@ -131,10 +133,12 @@ public PhotonCamera(NetworkTableInstance instance, String cameraName) { cameraTable .getRawTopic("rawBytes") .subscribe( - "rawBytes", new byte[] {}, PubSubOption.periodic(0.01), PubSubOption.sendAll(true)); - resultSubscriber = - new PacketSubscriber<>( - rawBytesEntry, PhotonPipelineResult.serde, new PhotonPipelineResult()); + "rawBytes", + new byte[] {}, + PubSubOption.periodic(0.01), + PubSubOption.sendAll(true), + PubSubOption.pollStorage(20)); + resultSubscriber = new PacketSubscriber<>(rawBytesEntry, PhotonPipelineResult.serde); driverModePublisher = cameraTable.getBooleanTopic("driverModeRequest").publish(); driverModeSubscriber = cameraTable.getBooleanTopic("driverMode").subscribe(false); inputSaveImgEntry = cameraTable.getIntegerTopic("inputSaveImgCmd").getEntry(0); @@ -170,22 +174,50 @@ public PhotonCamera(String cameraName) { } /** - * Returns the latest pipeline result. + * The list of pipeline results sent by PhotonVision since the last call to getAllUnreadResults(). + * Calling this function clears the internal FIFO queue, and multiple calls to + * getAllUnreadResults() will return different (potentially empty) result arrays. Be careful to + * call this exactly ONCE per loop of your robot code! FIFO depth is limited to 20 changes, so + * make sure to call this frequently enough to avoid old results being discarded, too! + */ + public List getAllUnreadResults() { + List ret = new ArrayList<>(); + + var changes = resultSubscriber.getAllChanges(); + + for (var c : changes) { + var result = c.value; + result.setTimestampSeconds((c.timestamp / 1e6) - (result.getLatencyMillis() / 1e3)); + ret.add(result); + } + + return ret; + } + + /** + * Returns the latest pipeline result. This is simply the most recent result recieved via NT. + * Calling this multiple times will always return the most recent result. * - * @return The latest pipeline result. + *

Replaced by {@link #getAllUnreadResults()} over getLatestResult, as this function can miss + * results, or provide duplicate ones! */ + @Deprecated(since = "2024", forRemoval = true) public PhotonPipelineResult getLatestResult() { verifyVersion(); var ret = resultSubscriber.get(); - // Set the timestamp of the result. + if (ret.timestamp == 0) return new PhotonPipelineResult(); + + var result = ret.value; + + // Set the timestamp of the result. Since PacketSubscriber doesn't realize that the result + // contains a thing with time knowledge, set it here. // getLatestChange returns in microseconds, so we divide by 1e6 to convert to seconds. - ret.setTimestampSeconds( - (resultSubscriber.subscriber.getLastChange() / 1e6) - ret.getLatencyMillis() / 1e3); + // TODO: NT4 time sync is Not To Be Trusted, we should do something else? + result.setTimestampSeconds((ret.timestamp / 1e6) - (result.getLatencyMillis() / 1e3)); - // Return result. - return ret; + return result; } /** diff --git a/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java b/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java index 6b217c9b73..f4d125b212 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java @@ -90,7 +90,6 @@ public enum PoseStrategy { private TargetModel tagModel = TargetModel.kAprilTag16h5; private PoseStrategy primaryStrategy; private PoseStrategy multiTagFallbackStrategy = PoseStrategy.LOWEST_AMBIGUITY; - private final PhotonCamera camera; private Transform3d robotToCamera; private Pose3d lastPose; @@ -107,31 +106,21 @@ public enum PoseStrategy { * Coordinate System. Note that setting the origin of this layout object will affect the * results from this class. * @param strategy The strategy it should use to determine the best pose. - * @param camera PhotonCamera * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie, * robot ➔ camera) in the Robot * Coordinate System. */ public PhotonPoseEstimator( - AprilTagFieldLayout fieldTags, - PoseStrategy strategy, - PhotonCamera camera, - Transform3d robotToCamera) { + AprilTagFieldLayout fieldTags, PoseStrategy strategy, Transform3d robotToCamera) { this.fieldTags = fieldTags; this.primaryStrategy = strategy; - this.camera = camera; this.robotToCamera = robotToCamera; HAL.report(tResourceType.kResourceType_PhotonPoseEstimator, InstanceCount); InstanceCount++; } - public PhotonPoseEstimator( - AprilTagFieldLayout fieldTags, PoseStrategy strategy, Transform3d robotToCamera) { - this(fieldTags, strategy, null, robotToCamera); - } - /** Invalidates the pose cache. */ private void invalidatePoseCache() { poseCacheTimestampSeconds = -1; @@ -288,36 +277,14 @@ public void setRobotToCameraTransform(Transform3d robotToCamera) { } /** - * Poll data from the configured cameras and update the estimated position of the robot. Returns - * empty if: - * - *

    - *
  • New data has not been received since the last call to {@code update()}. - *
  • No targets were found from the camera - *
  • There is no camera set - *
- * - * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to - * create the estimate. - */ - public Optional update() { - if (camera == null) { - DriverStation.reportError("[PhotonPoseEstimator] Missing camera!", false); - return Optional.empty(); - } - - PhotonPipelineResult cameraResult = camera.getLatestResult(); - - return update(cameraResult, camera.getCameraMatrix(), camera.getDistCoeffs()); - } - - /** - * Updates the estimated position of the robot. Returns empty if: + * Updates the estimated position of the robot, assuming no camera calibration is required for the + * selected strategy. Returns empty if: * *
    *
  • The timestamp of the provided pipeline result is the same as in the previous call to * {@code update()}. *
  • No targets were found in the pipeline results. + *
  • Strategy is multi-tag on coprocessor *
* * @param cameraResult The latest pipeline result from the camera @@ -325,8 +292,7 @@ public Optional update() { * create the estimate. */ public Optional update(PhotonPipelineResult cameraResult) { - if (camera == null) return update(cameraResult, Optional.empty(), Optional.empty()); - return update(cameraResult, camera.getCameraMatrix(), camera.getDistCoeffs()); + return update(cameraResult, Optional.empty(), Optional.empty()); } /** diff --git a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp index 4a0b34249e..11c41c921e 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp @@ -66,7 +66,9 @@ PhotonCamera::PhotonCamera(nt::NetworkTableInstance instance, rootTable(mainTable->GetSubTable(cameraName)), rawBytesEntry( rootTable->GetRawTopic("rawBytes") - .Subscribe("rawBytes", {}, {.periodic = 0.01, .sendAll = true})), + .Subscribe( + "rawBytes", {}, + {.pollStorage = 20, .periodic = 0.01, .sendAll = true})), inputSaveImgEntry( rootTable->GetIntegerTopic("inputSaveImgCmd").Publish()), inputSaveImgSubscriber( @@ -102,15 +104,15 @@ PhotonCamera::PhotonCamera(const std::string_view cameraName) PhotonPipelineResult PhotonCamera::GetLatestResult() { if (test) { - return testResult; + if (testResult.size()) + return testResult.back(); + else + return PhotonPipelineResult{}; } // Prints warning if not connected VerifyVersion(); - // Clear the current packet. - packet.Clear(); - // Create the new result; PhotonPipelineResult result; @@ -128,6 +130,37 @@ PhotonPipelineResult PhotonCamera::GetLatestResult() { return result; } +std::vector PhotonCamera::GetAllUnreadResults() { + if (test) { + return testResult; + } + + // Prints warning if not connected + VerifyVersion(); + + const auto changes = rawBytesEntry.ReadQueue(); + + // Create the new result list -- these will be updated in-place + std::vector ret(changes.size()); + + for (size_t i = 0; i < changes.size(); i++) { + const nt::Timestamped>& value = changes[i]; + + if (!value.value.size() || value.time == 0) { + continue; + } + + // Fill the packet with latest data and populate result. + photon::Packet packet{value.value}; + + PhotonPipelineResult& result = ret[i]; + packet >> result; + result.SetTimestamp(units::microsecond_t(value.time) - result.GetLatency()); + } + + return ret; +} + void PhotonCamera::SetDriverMode(bool driverMode) { driverModePublisher.Set(driverMode); } diff --git a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp index d14394b999..77852de6c0 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp @@ -66,22 +66,6 @@ PhotonPoseEstimator::PhotonPoseEstimator(frc::AprilTagFieldLayout tags, frc::Transform3d robotToCamera) : aprilTags(tags), strategy(strat), - camera(nullptr), - m_robotToCamera(robotToCamera), - lastPose(frc::Pose3d()), - referencePose(frc::Pose3d()), - poseCacheTimestamp(-1_s) { - HAL_Report(HALUsageReporting::kResourceType_PhotonPoseEstimator, - InstanceCount); - InstanceCount++; -} - -PhotonPoseEstimator::PhotonPoseEstimator(frc::AprilTagFieldLayout tags, - PoseStrategy strat, PhotonCamera&& cam, - frc::Transform3d robotToCamera) - : aprilTags(tags), - strategy(strat), - camera(std::make_shared(std::move(cam))), m_robotToCamera(robotToCamera), lastPose(frc::Pose3d()), referencePose(frc::Pose3d()), @@ -106,25 +90,6 @@ void PhotonPoseEstimator::SetMultiTagFallbackStrategy(PoseStrategy strategy) { multiTagFallbackStrategy = strategy; } -std::optional PhotonPoseEstimator::Update() { - if (!camera) { - FRC_ReportError(frc::warn::Warning, "[PhotonPoseEstimator] Missing camera!", - ""); - return std::nullopt; - } - auto result = camera->GetLatestResult(); - return Update(result, camera->GetCameraMatrix(), camera->GetDistCoeffs()); -} - -std::optional PhotonPoseEstimator::Update( - const PhotonPipelineResult& result) { - // If camera is null, best we can do is pass null calibration data - if (!camera) { - return Update(result, std::nullopt, std::nullopt, this->strategy); - } - return Update(result, camera->GetCameraMatrix(), camera->GetDistCoeffs()); -} - std::optional PhotonPoseEstimator::Update( const PhotonPipelineResult& result, std::optional cameraMatrixData, std::optional cameraDistCoeffs) { @@ -399,6 +364,10 @@ std::optional PhotonPoseEstimator::MultiTagOnRioStrategy( } if (!camMat || !distCoeffs) { + FRC_ReportError(frc::warn::Warning, + "No camera calibration data provided to " + "PhotonPoseEstimator::MultiTagOnRioStrategy!", + ""); return Update(result, std::nullopt, std::nullopt, this->multiTagFallbackStrategy); } diff --git a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp new file mode 100644 index 0000000000..0b10023ec6 --- /dev/null +++ b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp @@ -0,0 +1,384 @@ +/* + * MIT License + * + * Copyright (c) PhotonVision + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "photon/simulation/PhotonCameraSim.h" + +namespace photon { +PhotonCameraSim::PhotonCameraSim(PhotonCamera* camera) + : PhotonCameraSim(camera, photon::SimCameraProperties::PERFECT_90DEG()) {} +PhotonCameraSim::PhotonCameraSim(PhotonCamera* camera, + const SimCameraProperties& props) + : prop(props), cam(camera) { + SetMinTargetAreaPixels(kDefaultMinAreaPx); + videoSimRaw = + frc::CameraServer::PutVideo(std::string{camera->GetCameraName()} + "-raw", + prop.GetResWidth(), prop.GetResHeight()); + videoSimRaw.SetPixelFormat(cs::VideoMode::PixelFormat::kGray); + videoSimProcessed = frc::CameraServer::PutVideo( + std::string{camera->GetCameraName()} + "-processed", prop.GetResWidth(), + prop.GetResHeight()); + ts.subTable = cam->GetCameraTable(); + ts.UpdateEntries(); +} +PhotonCameraSim::PhotonCameraSim(PhotonCamera* camera, + const SimCameraProperties& props, + double minTargetAreaPercent, + units::meter_t maxSightRange) + : PhotonCameraSim(camera, props) { + this->minTargetAreaPercent = minTargetAreaPercent; + this->maxSightRange = maxSightRange; +} + +bool PhotonCameraSim::CanSeeTargetPose(const frc::Pose3d& camPose, + const VisionTargetSim& target) { + CameraTargetRelation rel{camPose, target.GetPose()}; + return ((units::math::abs(rel.camToTargYaw.Degrees()) < + prop.GetHorizFOV().Degrees() / 2.0) && + (units::math::abs(rel.camToTargPitch.Degrees()) < + prop.GetVertFOV().Degrees() / 2.0) && + (!target.GetModel().GetIsPlanar() || + units::math::abs(rel.targToCamAngle.Degrees()) < 90_deg) && + (rel.camToTarg.Translation().Norm() <= maxSightRange)); +} +bool PhotonCameraSim::CanSeeCorner(const std::vector& points) { + for (const auto& pt : points) { + if (std::clamp(pt.x, 0, prop.GetResWidth()) != pt.x || + std::clamp(pt.y, 0, prop.GetResHeight()) != pt.y) { + return false; + } + } + return true; +} +std::optional PhotonCameraSim::ConsumeNextEntryTime() { + uint64_t now = wpi::Now(); + uint64_t timestamp{}; + int iter = 0; + while (now >= nextNTEntryTime) { + timestamp = nextNTEntryTime; + uint64_t frameTime = prop.EstSecUntilNextFrame() + .convert() + .to(); + nextNTEntryTime += frameTime; + + if (iter++ > 50) { + timestamp = now; + nextNTEntryTime = now + frameTime; + break; + } + } + + if (timestamp != 0) { + return timestamp; + } else { + return std::nullopt; + } +} +PhotonPipelineResult PhotonCameraSim::Process( + units::second_t latency, const frc::Pose3d& cameraPose, + std::vector targets) { + std::sort(targets.begin(), targets.end(), + [cameraPose](const VisionTargetSim& t1, const VisionTargetSim& t2) { + units::meter_t dist1 = + t1.GetPose().Translation().Distance(cameraPose.Translation()); + units::meter_t dist2 = + t2.GetPose().Translation().Distance(cameraPose.Translation()); + return dist1 > dist2; + }); + + std::vector>> + visibleTgts{}; + std::vector detectableTgts{}; + RotTrlTransform3d camRt = RotTrlTransform3d::MakeRelativeTo(cameraPose); + + VideoSimUtil::UpdateVideoProp(videoSimRaw, prop); + VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop); + cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()}; + cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1); + blankFrame.assignTo(videoSimFrameRaw); + + for (const auto& tgt : targets) { + if (!CanSeeTargetPose(cameraPose, tgt)) { + continue; + } + + std::vector fieldCorners = tgt.GetFieldVertices(); + if (tgt.GetModel().GetIsSpherical()) { + TargetModel model = tgt.GetModel(); + fieldCorners = model.GetFieldVertices(TargetModel::GetOrientedPose( + tgt.GetPose().Translation(), cameraPose.Translation())); + } + + std::vector imagePoints = OpenCVHelp::ProjectPoints( + prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, fieldCorners); + if (tgt.GetModel().GetIsSpherical()) { + cv::Point2d center = OpenCVHelp::AvgPoint(imagePoints); + int l = 0; + int t = 0; + int b = 0; + int r = 0; + for (int i = 0; i < 4; i++) { + if (imagePoints[i].x < imagePoints[l].x) { + l = i; + } + } + cv::Point2d lc = imagePoints[l]; + std::array angles{}; + t = (l + 1) % 4; + b = (l + 1) % 4; + for (int i = 0; i < 4; i++) { + if (i == l) { + continue; + } + cv::Point2d ic = imagePoints[i]; + angles[i] = std::atan2(lc.y - ic.y, ic.x - lc.x); + if (angles[i] >= angles[t]) { + t = i; + } + if (angles[i] <= angles[b]) { + b = i; + } + } + for (int i = 0; i < 4; i++) { + if (i != t && i != l && i != b) { + r = i; + } + } + cv::RotatedRect rect{ + cv::Point2d{center.x, center.y}, + cv::Size2d{imagePoints[r].x - lc.x, + imagePoints[b].y - imagePoints[t].y}, + units::radian_t{-angles[r]}.convert().to()}; + std::vector points{}; + rect.points(points); + + // Can't find an easier way to convert from Point2f to Point2d + imagePoints.clear(); + std::transform(points.begin(), points.end(), + std::back_inserter(imagePoints), + [](const cv::Point2f& p) { return (cv::Point2d)p; }); + } + + visibleTgts.emplace_back(std::make_pair(tgt, imagePoints)); + std::vector noisyTargetCorners = + prop.EstPixelNoise(imagePoints); + cv::RotatedRect minAreaRect = + OpenCVHelp::GetMinAreaRect(noisyTargetCorners); + std::vector minAreaRectPts; + minAreaRectPts.reserve(4); + minAreaRect.points(minAreaRectPts); + cv::Point2d centerPt = minAreaRect.center; + frc::Rotation3d centerRot = prop.GetPixelRot(centerPt); + double areaPercent = prop.GetContourAreaPercent(noisyTargetCorners); + + if (!(CanSeeCorner(noisyTargetCorners) && + areaPercent >= minTargetAreaPercent)) { + continue; + } + + PNPResult pnpSim{}; + if (tgt.fiducialId >= 0 && tgt.GetFieldVertices().size() == 4) { + pnpSim = OpenCVHelp::SolvePNP_Square( + prop.GetIntrinsics(), prop.GetDistCoeffs(), + tgt.GetModel().GetVertices(), noisyTargetCorners); + } + + std::vector> tempCorners = + OpenCVHelp::PointsToCorners(minAreaRectPts); + wpi::SmallVector, 4> smallVec; + + for (const auto& corner : tempCorners) { + smallVec.emplace_back(std::make_pair(static_cast(corner.first), + static_cast(corner.second))); + } + + std::vector> cornersFloat = + OpenCVHelp::PointsToCorners(noisyTargetCorners); + + std::vector> cornersDouble{cornersFloat.begin(), + cornersFloat.end()}; + detectableTgts.emplace_back(PhotonTrackedTarget{ + -centerRot.Z().convert().to(), + -centerRot.Y().convert().to(), areaPercent, + centerRot.X().convert().to(), tgt.fiducialId, + pnpSim.best, pnpSim.alt, pnpSim.ambiguity, smallVec, cornersDouble}); + } + + if (videoSimRawEnabled) { + if (videoSimWireframeEnabled) { + VideoSimUtil::DrawFieldWireFrame(camRt, prop, videoSimWireframeResolution, + 1.5, cv::Scalar{80}, 6, 1, + cv::Scalar{30}, videoSimFrameRaw); + } + + for (const auto& pair : visibleTgts) { + VisionTargetSim tgt = pair.first; + std::vector corners = pair.second; + + if (tgt.fiducialId > 0) { + VideoSimUtil::Warp165h5TagImage(tgt.fiducialId, corners, true, + videoSimFrameRaw); + } else if (!tgt.GetModel().GetIsSpherical()) { + std::vector contour = corners; + if (!tgt.GetModel().GetIsPlanar()) { + contour = OpenCVHelp::GetConvexHull(contour); + } + VideoSimUtil::DrawPoly(contour, -1, cv::Scalar{255}, true, + videoSimFrameRaw); + } else { + VideoSimUtil::DrawInscribedEllipse(corners, cv::Scalar{255}, + videoSimFrameRaw); + } + } + videoSimRaw.PutFrame(videoSimFrameRaw); + } else { + videoSimRaw.SetConnectionStrategy( + cs::VideoSource::ConnectionStrategy::kConnectionForceClose); + } + + if (videoSimProcEnabled) { + cv::cvtColor(videoSimFrameRaw, videoSimFrameProcessed, cv::COLOR_GRAY2BGR); + cv::drawMarker( + videoSimFrameProcessed, + cv::Point2d{prop.GetResWidth() / 2.0, prop.GetResHeight() / 2.0}, + cv::Scalar{0, 255, 0}, cv::MARKER_CROSS, + static_cast( + VideoSimUtil::GetScaledThickness(15, videoSimFrameProcessed)), + static_cast( + VideoSimUtil::GetScaledThickness(1, videoSimFrameProcessed)), + cv::LINE_AA); + for (const auto& tgt : detectableTgts) { + auto detectedCornersDouble = tgt.GetDetectedCorners(); + std::vector> detectedCornerFloat{ + detectedCornersDouble.begin(), detectedCornersDouble.end()}; + if (tgt.GetFiducialId() >= 0) { + VideoSimUtil::DrawTagDetection( + tgt.GetFiducialId(), + OpenCVHelp::CornersToPoints(detectedCornerFloat), + videoSimFrameProcessed); + } else { + cv::rectangle(videoSimFrameProcessed, + OpenCVHelp::GetBoundingRect( + OpenCVHelp::CornersToPoints(detectedCornerFloat)), + cv::Scalar{0, 0, 255}, + static_cast(VideoSimUtil::GetScaledThickness( + 1, videoSimFrameProcessed)), + cv::LINE_AA); + + wpi::SmallVector, 4> smallVec = + tgt.GetMinAreaRectCorners(); + + std::vector> cornersCopy{}; + cornersCopy.reserve(4); + + for (const auto& corner : smallVec) { + cornersCopy.emplace_back( + std::make_pair(static_cast(corner.first), + static_cast(corner.second))); + } + + VideoSimUtil::DrawPoly( + OpenCVHelp::CornersToPoints(cornersCopy), + static_cast( + VideoSimUtil::GetScaledThickness(1, videoSimFrameProcessed)), + cv::Scalar{255, 30, 30}, true, videoSimFrameProcessed); + } + } + videoSimProcessed.PutFrame(videoSimFrameProcessed); + } else { + videoSimProcessed.SetConnectionStrategy( + cs::VideoSource::ConnectionStrategy::kConnectionForceClose); + } + + MultiTargetPNPResult multiTagResults{}; + + std::vector visibleLayoutTags = + VisionEstimation::GetVisibleLayoutTags(detectableTgts, tagLayout); + if (visibleLayoutTags.size() > 1) { + wpi::SmallVector usedIds{}; + std::transform(visibleLayoutTags.begin(), visibleLayoutTags.end(), + usedIds.begin(), + [](const frc::AprilTag& tag) { return tag.ID; }); + std::sort(usedIds.begin(), usedIds.end()); + PNPResult pnpResult = VisionEstimation::EstimateCamPosePNP( + prop.GetIntrinsics(), prop.GetDistCoeffs(), detectableTgts, tagLayout, + kAprilTag36h11); + multiTagResults = MultiTargetPNPResult{pnpResult, usedIds}; + } + + return PhotonPipelineResult{latency, detectableTgts, multiTagResults}; +} +void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result) { + SubmitProcessedFrame(result, wpi::Now()); +} +void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result, + uint64_t recieveTimestamp) { + ts.latencyMillisEntry.Set( + result.GetLatency().convert().to(), + recieveTimestamp); + + Packet newPacket{}; + newPacket << result; + + ts.rawBytesEntry.Set(newPacket.GetData(), recieveTimestamp); + + bool hasTargets = result.HasTargets(); + ts.hasTargetEntry.Set(hasTargets, recieveTimestamp); + if (!hasTargets) { + ts.targetPitchEntry.Set(0.0, recieveTimestamp); + ts.targetYawEntry.Set(0.0, recieveTimestamp); + ts.targetAreaEntry.Set(0.0, recieveTimestamp); + std::array poseData{0.0, 0.0, 0.0}; + ts.targetPoseEntry.Set(poseData, recieveTimestamp); + ts.targetSkewEntry.Set(0.0, recieveTimestamp); + } else { + PhotonTrackedTarget bestTarget = result.GetBestTarget(); + + ts.targetPitchEntry.Set(bestTarget.GetPitch(), recieveTimestamp); + ts.targetYawEntry.Set(bestTarget.GetYaw(), recieveTimestamp); + ts.targetAreaEntry.Set(bestTarget.GetArea(), recieveTimestamp); + ts.targetSkewEntry.Set(bestTarget.GetSkew(), recieveTimestamp); + + frc::Transform3d transform = bestTarget.GetBestCameraToTarget(); + std::array poseData{ + transform.X().to(), transform.Y().to(), + transform.Rotation().ToRotation2d().Degrees().to()}; + ts.targetPoseEntry.Set(poseData, recieveTimestamp); + } + + auto intrinsics = prop.GetIntrinsics(); + std::vector intrinsicsView{intrinsics.data(), + intrinsics.data() + intrinsics.size()}; + ts.cameraIntrinsicsPublisher.Set(intrinsicsView, recieveTimestamp); + + auto distortion = prop.GetDistCoeffs(); + std::vector distortionView{distortion.data(), + distortion.data() + distortion.size()}; + ts.cameraDistortionPublisher.Set(distortionView, recieveTimestamp); + + ts.heartbeatPublisher.Set(heartbeatCounter++, recieveTimestamp); + + ts.subTable->GetInstance().Flush(); +} + +} // namespace photon diff --git a/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp b/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp new file mode 100644 index 0000000000..bdb4bb2f47 --- /dev/null +++ b/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp @@ -0,0 +1,219 @@ +/* + * MIT License + * + * Copyright (c) PhotonVision + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "photon/simulation/SimCameraProperties.h" + +using namespace photon; + +void SimCameraProperties::SetCalibration(int width, int height, + frc::Rotation2d fovDiag) { + if (fovDiag.Degrees() < 1_deg || fovDiag.Degrees() > 179_deg) { + fovDiag = frc::Rotation2d{ + std::clamp(fovDiag.Degrees(), 1_deg, 179_deg)}; + FRC_ReportError( + frc::err::Error, + "Requested invalid FOV! Clamping between (1, 179) degrees..."); + } + double resDiag = std::sqrt(width * width + height * height); + double diagRatio = units::math::tan(fovDiag.Radians() / 2.0); + frc::Rotation2d fovWidth{ + units::radian_t{std::atan(diagRatio * (width / resDiag)) * 2}}; + frc::Rotation2d fovHeight{ + units::radian_t{std::atan(diagRatio * (height / resDiag)) * 2}}; + + Eigen::Matrix newDistCoeffs; + newDistCoeffs << 0, 0, 0, 0, 0; + + double cx = width / 2.0 - 0.5; + double cy = height / 2.0 - 0.5; + + double fx = cx / units::math::tan(fovWidth.Radians() / 2.0); + double fy = cy / units::math::tan(fovHeight.Radians() / 2.0); + + Eigen::Matrix newCamIntrinsics; + newCamIntrinsics << fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0; + SetCalibration(width, height, newCamIntrinsics, newDistCoeffs); +} + +void SimCameraProperties::SetCalibration( + int width, int height, const Eigen::Matrix& newCamIntrinsics, + const Eigen::Matrix& newDistCoeffs) { + resWidth = width; + resHeight = height; + camIntrinsics = newCamIntrinsics; + distCoeffs = newDistCoeffs; + + std::array p{ + frc::Translation3d{1_m, frc::Rotation3d{0_rad, 0_rad, + (GetPixelYaw(0) + + frc::Rotation2d{units::radian_t{ + -std::numbers::pi / 2.0}}) + .Radians()}}, + frc::Translation3d{1_m, frc::Rotation3d{0_rad, 0_rad, + (GetPixelYaw(width) + + frc::Rotation2d{units::radian_t{ + std::numbers::pi / 2.0}}) + .Radians()}}, + frc::Translation3d{1_m, frc::Rotation3d{0_rad, + (GetPixelPitch(0) + + frc::Rotation2d{units::radian_t{ + std::numbers::pi / 2.0}}) + .Radians(), + 0_rad}}, + frc::Translation3d{1_m, frc::Rotation3d{0_rad, + (GetPixelPitch(height) + + frc::Rotation2d{units::radian_t{ + -std::numbers::pi / 2.0}}) + .Radians(), + 0_rad}}, + }; + viewplanes.clear(); + for (size_t i = 0; i < p.size(); i++) { + viewplanes.emplace_back(Eigen::Matrix{ + p[i].X().to(), p[i].Y().to(), p[i].Z().to()}); + } +} + +std::pair, std::optional> +SimCameraProperties::GetVisibleLine(const RotTrlTransform3d& camRt, + const frc::Translation3d& a, + const frc::Translation3d& b) const { + frc::Translation3d relA = camRt.Apply(a); + frc::Translation3d relB = camRt.Apply(b); + + if (relA.X() <= 0_m && relB.X() <= 0_m) { + return {std::nullopt, std::nullopt}; + } + + Eigen::Matrix av{relA.X().to(), relA.Y().to(), + relA.Z().to()}; + Eigen::Matrix bv{relB.X().to(), relB.Y().to(), + relB.Z().to()}; + Eigen::Matrix abv = bv - av; + + bool aVisible = true; + bool bVisible = true; + for (size_t i = 0; i < viewplanes.size(); i++) { + Eigen::Matrix normal = viewplanes[i]; + double aVisibility = av.dot(normal); + if (aVisibility < 0) { + aVisible = false; + } + double bVisibility = bv.dot(normal); + if (bVisibility < 0) { + bVisible = false; + } + if (aVisibility <= 0 && bVisibility <= 0) { + return {std::nullopt, std::nullopt}; + } + } + + if (aVisible && bVisible) { + return {0, 1}; + } + + std::array intersections{std::nan(""), std::nan(""), std::nan(""), + std::nan("")}; + std::vector>> ipts{ + {std::nullopt, std::nullopt, std::nullopt, std::nullopt}}; + + for (size_t i = 0; i < viewplanes.size(); i++) { + Eigen::Matrix normal = viewplanes[i]; + Eigen::Matrix a_projn{}; + a_projn = (av.dot(normal) / normal.dot(normal)) * normal; + + if (std::abs(abv.dot(normal)) < 1e-5) { + continue; + } + intersections[i] = a_projn.dot(a_projn) / -(abv.dot(a_projn)); + + Eigen::Matrix apv{}; + apv = intersections[i] * abv; + Eigen::Matrix intersectpt{}; + intersectpt = av + apv; + ipts[i] = intersectpt; + + for (size_t j = 1; j < viewplanes.size(); j++) { + int oi = (i + j) % viewplanes.size(); + Eigen::Matrix onormal = viewplanes[oi]; + if (intersectpt.dot(onormal) < 0) { + intersections[i] = std::nan(""); + ipts[i] = std::nullopt; + break; + } + } + + if (!ipts[i]) { + continue; + } + + for (int j = i - 1; j >= 0; j--) { + std::optional> oipt = ipts[j]; + if (!oipt) { + continue; + } + Eigen::Matrix diff{}; + diff = oipt.value() - intersectpt; + if (diff.cwiseAbs().maxCoeff() < 1e-4) { + intersections[i] = std::nan(""); + ipts[i] = std::nullopt; + break; + } + } + } + + double inter1 = std::nan(""); + double inter2 = std::nan(""); + for (double inter : intersections) { + if (!std::isnan(inter)) { + if (std::isnan(inter1)) { + inter1 = inter; + } else { + inter2 = inter; + } + } + } + + if (!std::isnan(inter2)) { + double max = std::max(inter1, inter2); + double min = std::min(inter1, inter2); + if (aVisible) { + min = 0; + } + if (bVisible) { + max = 1; + } + return {min, max}; + } else if (!std::isnan(inter1)) { + if (aVisible) { + return {0, inter1}; + } + if (bVisible) { + return {inter1, 1}; + } + return {inter1, std::nullopt}; + } else { + return {std::nullopt, std::nullopt}; + } +} diff --git a/photon-lib/src/main/native/include/photon/PhotonCamera.h b/photon-lib/src/main/native/include/photon/PhotonCamera.h index d9cf2574e6..8da9424154 100644 --- a/photon-lib/src/main/native/include/photon/PhotonCamera.h +++ b/photon-lib/src/main/native/include/photon/PhotonCamera.h @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -74,13 +75,20 @@ class PhotonCamera { PhotonCamera(PhotonCamera&&) = default; - virtual ~PhotonCamera() = default; + ~PhotonCamera() = default; /** - * Returns the latest pipeline result. - * @return The latest pipeline result. + * The list of pipeline results sent by PhotonVision since the last call to + * GetAllUnreadResults(). Calling this function clears the internal FIFO + * queue, and multiple calls to GetAllUnreadResults() will return different + * (potentially empty) result arrays. Be careful to call this exactly ONCE per + * loop of your robot code! FIFO depth is limited to 20 changes, so make sure + * to call this frequently enough to avoid old results being discarded, too! */ - virtual PhotonPipelineResult GetLatestResult(); + std::vector GetAllUnreadResults(); + + [[deprecated("Replace with GetAllUnreadResults")]] + PhotonPipelineResult GetLatestResult(); /** * Toggles driver mode. @@ -159,7 +167,7 @@ class PhotonCamera { // For use in tests bool test = false; - PhotonPipelineResult testResult; + std::vector testResult; protected: std::shared_ptr mainTable; @@ -187,8 +195,6 @@ class PhotonCamera { std::string path; std::string m_cameraName; - mutable Packet packet; - private: units::second_t lastVersionCheckTime = 0_s; inline static bool VERSION_CHECK_ENABLED = true; diff --git a/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h b/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h index 2c863330bb..d251480d3f 100644 --- a/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h +++ b/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h @@ -29,6 +29,7 @@ #include #include #include +#include #include "photon/PhotonCamera.h" #include "photon/dataflow/structures/Packet.h" @@ -37,10 +38,6 @@ #include "photon/targeting/PhotonPipelineResult.h" #include "photon/targeting/PhotonTrackedTarget.h" -namespace cv { -class Mat; -} // namespace cv - namespace photon { enum PoseStrategy { LOWEST_AMBIGUITY = 0, @@ -85,28 +82,6 @@ class PhotonPoseEstimator { /** * Create a new PhotonPoseEstimator. * - *

Example: {@code

Map map = new HashMap<>(); - *

map.put(1, new Pose3d(1.0, 2.0, 3.0, new Rotation3d())); // Tag ID 1 is - * at (1.0,2.0,3.0) } - * - * @param aprilTags A AprilTagFieldLayout linking AprilTag IDs to Pose3ds with - * respect to the FIRST field. - * @param strategy The strategy it should use to determine the best pose. - * @param camera PhotonCameras and - * @param robotToCamera Transform3d from the center of the robot to the camera - * mount positions (ie, robot ➔ camera). - */ - explicit PhotonPoseEstimator(frc::AprilTagFieldLayout aprilTags, - PoseStrategy strategy, PhotonCamera&& camera, - frc::Transform3d robotToCamera); - - /** - * Create a new PhotonPoseEstimator. - * - *

Example: {@code

Map map = new HashMap<>(); - *

map.put(1, new Pose3d(1.0, 2.0, 3.0, new Rotation3d())); // Tag ID 1 is - * at (1.0,2.0,3.0) } - * * @param aprilTags A AprilTagFieldLayout linking AprilTag IDs to Pose3ds with * respect to the FIRST field. * @param strategy The strategy it should use to determine the best pose. @@ -198,32 +173,26 @@ class PhotonPoseEstimator { inline void SetLastPose(frc::Pose3d lastPose) { this->lastPose = lastPose; } /** - * Update the pose estimator. Internally grabs a new PhotonPipelineResult from - * the camera and process it. - */ - std::optional Update(); - - /** - * Update the pose estimator. - */ - std::optional Update(const PhotonPipelineResult& result); - - /** - * Update the pose estimator. + * Update the pose estimator. If updating multiple times per loop, you should + * call this exactly once per new result, in order of increasing result + * timestamp. + * + * @param result The vision targeting result to process + * @param cameraIntrinsics The camera calibration pinhole coefficients matrix. + * Only required if doing multitag-on-rio, and may be nullopt otherwise. + * @param distCoeffsData The camera calibration distortion coefficients. Only + * required if doing multitag-on-rio, and may be nullopt otherwise. */ std::optional Update( const PhotonPipelineResult& result, - std::optional cameraMatrixData, - std::optional coeffsData); - - inline std::shared_ptr GetCamera() { return camera; } + std::optional cameraIntrinsics = std::nullopt, + std::optional distCoeffsData = std::nullopt); private: frc::AprilTagFieldLayout aprilTags; PoseStrategy strategy; PoseStrategy multiTagFallbackStrategy = LOWEST_AMBIGUITY; - std::shared_ptr camera; frc::Transform3d m_robotToCamera; frc::Pose3d lastPose; diff --git a/photon-lib/src/main/native/include/photon/simulation/PhotonCameraSim.h b/photon-lib/src/main/native/include/photon/simulation/PhotonCameraSim.h index bddcc0cb87..a45258ca1d 100644 --- a/photon-lib/src/main/native/include/photon/simulation/PhotonCameraSim.h +++ b/photon-lib/src/main/native/include/photon/simulation/PhotonCameraSim.h @@ -48,378 +48,50 @@ namespace photon { class PhotonCameraSim { public: - explicit PhotonCameraSim(PhotonCamera* camera) - : PhotonCameraSim(camera, photon::SimCameraProperties::PERFECT_90DEG()) {} - PhotonCameraSim(PhotonCamera* camera, const SimCameraProperties& props) - : prop(props), cam(camera) { - SetMinTargetAreaPixels(kDefaultMinAreaPx); - videoSimRaw = frc::CameraServer::PutVideo( - std::string{camera->GetCameraName()} + "-raw", prop.GetResWidth(), - prop.GetResHeight()); - videoSimRaw.SetPixelFormat(cs::VideoMode::PixelFormat::kGray); - videoSimProcessed = frc::CameraServer::PutVideo( - std::string{camera->GetCameraName()} + "-processed", prop.GetResWidth(), - prop.GetResHeight()); - ts.subTable = cam->GetCameraTable(); - ts.UpdateEntries(); - } + explicit PhotonCameraSim(PhotonCamera* camera); + PhotonCameraSim(PhotonCamera* camera, const SimCameraProperties& props); PhotonCameraSim(PhotonCamera* camera, const SimCameraProperties& props, - double minTargetAreaPercent, units::meter_t maxSightRange) - : PhotonCameraSim(camera, props) { - this->minTargetAreaPercent = minTargetAreaPercent; - this->maxSightRange = maxSightRange; - } - PhotonCamera* GetCamera() { return cam; } - double GetMinTargetAreaPercent() { return minTargetAreaPercent; } - double GetMinTargetAreaPixels() { + double minTargetAreaPercent, units::meter_t maxSightRange); + + inline PhotonCamera* GetCamera() { return cam; } + inline double GetMinTargetAreaPercent() { return minTargetAreaPercent; } + inline double GetMinTargetAreaPixels() { return minTargetAreaPercent / 100.0 * prop.GetResArea(); } - units::meter_t GetMaxSightRange() { return maxSightRange; } - const cs::CvSource& GetVideoSimRaw() { return videoSimRaw; } - const cv::Mat& GetVideoSimFrameRaw() { return videoSimFrameRaw; } - bool CanSeeTargetPose(const frc::Pose3d& camPose, - const VisionTargetSim& target) { - CameraTargetRelation rel{camPose, target.GetPose()}; - return ((units::math::abs(rel.camToTargYaw.Degrees()) < - prop.GetHorizFOV().Degrees() / 2.0) && - (units::math::abs(rel.camToTargPitch.Degrees()) < - prop.GetVertFOV().Degrees() / 2.0) && - (!target.GetModel().GetIsPlanar() || - units::math::abs(rel.targToCamAngle.Degrees()) < 90_deg) && - (rel.camToTarg.Translation().Norm() <= maxSightRange)); - } - bool CanSeeCorner(const std::vector& points) { - for (const auto& pt : points) { - if (std::clamp(pt.x, 0, prop.GetResWidth()) != pt.x || - std::clamp(pt.y, 0, prop.GetResHeight()) != pt.y) { - return false; - } - } - return true; - } - std::optional ConsumeNextEntryTime() { - uint64_t now = wpi::Now(); - uint64_t timestamp{}; - int iter = 0; - while (now >= nextNTEntryTime) { - timestamp = nextNTEntryTime; - uint64_t frameTime = prop.EstSecUntilNextFrame() - .convert() - .to(); - nextNTEntryTime += frameTime; + inline units::meter_t GetMaxSightRange() { return maxSightRange; } + inline const cs::CvSource& GetVideoSimRaw() { return videoSimRaw; } + inline const cv::Mat& GetVideoSimFrameRaw() { return videoSimFrameRaw; } - if (iter++ > 50) { - timestamp = now; - nextNTEntryTime = now + frameTime; - break; - } - } + bool CanSeeTargetPose(const frc::Pose3d& camPose, + const VisionTargetSim& target); + bool CanSeeCorner(const std::vector& points); + std::optional ConsumeNextEntryTime(); - if (timestamp != 0) { - return timestamp; - } else { - return std::nullopt; - } - } - void SetMinTargetAreaPercent(double areaPercent) { + inline void SetMinTargetAreaPercent(double areaPercent) { minTargetAreaPercent = areaPercent; } - void SetMinTargetAreaPixels(double areaPx) { + inline void SetMinTargetAreaPixels(double areaPx) { minTargetAreaPercent = areaPx / prop.GetResArea() * 100; } - void SetMaxSightRange(units::meter_t range) { maxSightRange = range; } - void EnableRawStream(bool enabled) { videoSimRawEnabled = enabled; } - void EnableDrawWireframe(bool enabled) { videoSimWireframeEnabled = enabled; } - void SetWireframeResolution(double resolution) { + inline void SetMaxSightRange(units::meter_t range) { maxSightRange = range; } + inline void EnableRawStream(bool enabled) { videoSimRawEnabled = enabled; } + inline void EnableDrawWireframe(bool enabled) { + videoSimWireframeEnabled = enabled; + } + inline void SetWireframeResolution(double resolution) { videoSimWireframeResolution = resolution; } - void EnabledProcessedStream(double enabled) { videoSimProcEnabled = enabled; } + inline void EnabledProcessedStream(double enabled) { + videoSimProcEnabled = enabled; + } PhotonPipelineResult Process(units::second_t latency, const frc::Pose3d& cameraPose, - std::vector targets) { - std::sort( - targets.begin(), targets.end(), - [cameraPose](const VisionTargetSim& t1, const VisionTargetSim& t2) { - units::meter_t dist1 = - t1.GetPose().Translation().Distance(cameraPose.Translation()); - units::meter_t dist2 = - t2.GetPose().Translation().Distance(cameraPose.Translation()); - return dist1 > dist2; - }); - - std::vector>> - visibleTgts{}; - std::vector detectableTgts{}; - RotTrlTransform3d camRt = RotTrlTransform3d::MakeRelativeTo(cameraPose); - - VideoSimUtil::UpdateVideoProp(videoSimRaw, prop); - VideoSimUtil::UpdateVideoProp(videoSimProcessed, prop); - cv::Size videoFrameSize{prop.GetResWidth(), prop.GetResHeight()}; - cv::Mat blankFrame = cv::Mat::zeros(videoFrameSize, CV_8UC1); - blankFrame.assignTo(videoSimFrameRaw); - - for (const auto& tgt : targets) { - if (!CanSeeTargetPose(cameraPose, tgt)) { - continue; - } - - std::vector fieldCorners = tgt.GetFieldVertices(); - if (tgt.GetModel().GetIsSpherical()) { - TargetModel model = tgt.GetModel(); - fieldCorners = model.GetFieldVertices(TargetModel::GetOrientedPose( - tgt.GetPose().Translation(), cameraPose.Translation())); - } - - std::vector imagePoints = OpenCVHelp::ProjectPoints( - prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, fieldCorners); - if (tgt.GetModel().GetIsSpherical()) { - cv::Point2d center = OpenCVHelp::AvgPoint(imagePoints); - int l = 0; - int t = 0; - int b = 0; - int r = 0; - for (int i = 0; i < 4; i++) { - if (imagePoints[i].x < imagePoints[l].x) { - l = i; - } - } - cv::Point2d lc = imagePoints[l]; - std::array angles{}; - t = (l + 1) % 4; - b = (l + 1) % 4; - for (int i = 0; i < 4; i++) { - if (i == l) { - continue; - } - cv::Point2d ic = imagePoints[i]; - angles[i] = std::atan2(lc.y - ic.y, ic.x - lc.x); - if (angles[i] >= angles[t]) { - t = i; - } - if (angles[i] <= angles[b]) { - b = i; - } - } - for (int i = 0; i < 4; i++) { - if (i != t && i != l && i != b) { - r = i; - } - } - cv::RotatedRect rect{ - cv::Point2d{center.x, center.y}, - cv::Size2d{imagePoints[r].x - lc.x, - imagePoints[b].y - imagePoints[t].y}, - units::radian_t{-angles[r]}.convert().to()}; - std::vector points{}; - rect.points(points); - - // Can't find an easier way to convert from Point2f to Point2d - imagePoints.clear(); - std::transform(points.begin(), points.end(), - std::back_inserter(imagePoints), - [](const cv::Point2f& p) { return (cv::Point2d)p; }); - } - - visibleTgts.emplace_back(std::make_pair(tgt, imagePoints)); - std::vector noisyTargetCorners = - prop.EstPixelNoise(imagePoints); - cv::RotatedRect minAreaRect = - OpenCVHelp::GetMinAreaRect(noisyTargetCorners); - std::vector minAreaRectPts; - minAreaRectPts.reserve(4); - minAreaRect.points(minAreaRectPts); - cv::Point2d centerPt = minAreaRect.center; - frc::Rotation3d centerRot = prop.GetPixelRot(centerPt); - double areaPercent = prop.GetContourAreaPercent(noisyTargetCorners); - - if (!(CanSeeCorner(noisyTargetCorners) && - areaPercent >= minTargetAreaPercent)) { - continue; - } - - PNPResult pnpSim{}; - if (tgt.fiducialId >= 0 && tgt.GetFieldVertices().size() == 4) { - pnpSim = OpenCVHelp::SolvePNP_Square( - prop.GetIntrinsics(), prop.GetDistCoeffs(), - tgt.GetModel().GetVertices(), noisyTargetCorners); - } - - std::vector> tempCorners = - OpenCVHelp::PointsToCorners(minAreaRectPts); - wpi::SmallVector, 4> smallVec; - - for (const auto& corner : tempCorners) { - smallVec.emplace_back( - std::make_pair(static_cast(corner.first), - static_cast(corner.second))); - } + std::vector targets); - std::vector> cornersFloat = - OpenCVHelp::PointsToCorners(noisyTargetCorners); - - std::vector> cornersDouble{cornersFloat.begin(), - cornersFloat.end()}; - detectableTgts.emplace_back(PhotonTrackedTarget{ - -centerRot.Z().convert().to(), - -centerRot.Y().convert().to(), areaPercent, - centerRot.X().convert().to(), tgt.fiducialId, - pnpSim.best, pnpSim.alt, pnpSim.ambiguity, smallVec, cornersDouble}); - } - - if (videoSimRawEnabled) { - if (videoSimWireframeEnabled) { - VideoSimUtil::DrawFieldWireFrame( - camRt, prop, videoSimWireframeResolution, 1.5, cv::Scalar{80}, 6, 1, - cv::Scalar{30}, videoSimFrameRaw); - } - - for (const auto& pair : visibleTgts) { - VisionTargetSim tgt = pair.first; - std::vector corners = pair.second; - - if (tgt.fiducialId > 0) { - VideoSimUtil::Warp165h5TagImage(tgt.fiducialId, corners, true, - videoSimFrameRaw); - } else if (!tgt.GetModel().GetIsSpherical()) { - std::vector contour = corners; - if (!tgt.GetModel().GetIsPlanar()) { - contour = OpenCVHelp::GetConvexHull(contour); - } - VideoSimUtil::DrawPoly(contour, -1, cv::Scalar{255}, true, - videoSimFrameRaw); - } else { - VideoSimUtil::DrawInscribedEllipse(corners, cv::Scalar{255}, - videoSimFrameRaw); - } - } - videoSimRaw.PutFrame(videoSimFrameRaw); - } else { - videoSimRaw.SetConnectionStrategy( - cs::VideoSource::ConnectionStrategy::kConnectionForceClose); - } - - if (videoSimProcEnabled) { - cv::cvtColor(videoSimFrameRaw, videoSimFrameProcessed, - cv::COLOR_GRAY2BGR); - cv::drawMarker( - videoSimFrameProcessed, - cv::Point2d{prop.GetResWidth() / 2.0, prop.GetResHeight() / 2.0}, - cv::Scalar{0, 255, 0}, cv::MARKER_CROSS, - static_cast( - VideoSimUtil::GetScaledThickness(15, videoSimFrameProcessed)), - static_cast( - VideoSimUtil::GetScaledThickness(1, videoSimFrameProcessed)), - cv::LINE_AA); - for (const auto& tgt : detectableTgts) { - auto detectedCornersDouble = tgt.GetDetectedCorners(); - std::vector> detectedCornerFloat{ - detectedCornersDouble.begin(), detectedCornersDouble.end()}; - if (tgt.GetFiducialId() >= 0) { - VideoSimUtil::DrawTagDetection( - tgt.GetFiducialId(), - OpenCVHelp::CornersToPoints(detectedCornerFloat), - videoSimFrameProcessed); - } else { - cv::rectangle(videoSimFrameProcessed, - OpenCVHelp::GetBoundingRect( - OpenCVHelp::CornersToPoints(detectedCornerFloat)), - cv::Scalar{0, 0, 255}, - static_cast(VideoSimUtil::GetScaledThickness( - 1, videoSimFrameProcessed)), - cv::LINE_AA); - - wpi::SmallVector, 4> smallVec = - tgt.GetMinAreaRectCorners(); - - std::vector> cornersCopy{}; - cornersCopy.reserve(4); - - for (const auto& corner : smallVec) { - cornersCopy.emplace_back( - std::make_pair(static_cast(corner.first), - static_cast(corner.second))); - } - - VideoSimUtil::DrawPoly( - OpenCVHelp::CornersToPoints(cornersCopy), - static_cast( - VideoSimUtil::GetScaledThickness(1, videoSimFrameProcessed)), - cv::Scalar{255, 30, 30}, true, videoSimFrameProcessed); - } - } - videoSimProcessed.PutFrame(videoSimFrameProcessed); - } else { - videoSimProcessed.SetConnectionStrategy( - cs::VideoSource::ConnectionStrategy::kConnectionForceClose); - } - - MultiTargetPNPResult multiTagResults{}; - - std::vector visibleLayoutTags = - VisionEstimation::GetVisibleLayoutTags(detectableTgts, tagLayout); - if (visibleLayoutTags.size() > 1) { - wpi::SmallVector usedIds{}; - std::transform(visibleLayoutTags.begin(), visibleLayoutTags.end(), - usedIds.begin(), - [](const frc::AprilTag& tag) { return tag.ID; }); - std::sort(usedIds.begin(), usedIds.end()); - PNPResult pnpResult = VisionEstimation::EstimateCamPosePNP( - prop.GetIntrinsics(), prop.GetDistCoeffs(), detectableTgts, tagLayout, - kAprilTag36h11); - multiTagResults = MultiTargetPNPResult{pnpResult, usedIds}; - } - - return PhotonPipelineResult{latency, detectableTgts, multiTagResults}; - } - void SubmitProcessedFrame(const PhotonPipelineResult& result) { - SubmitProcessedFrame(result, wpi::Now()); - } + void SubmitProcessedFrame(const PhotonPipelineResult& result); void SubmitProcessedFrame(const PhotonPipelineResult& result, - uint64_t recieveTimestamp) { - ts.latencyMillisEntry.Set( - result.GetLatency().convert().to(), - recieveTimestamp); - - Packet newPacket{}; - newPacket << result; - ts.rawBytesEntry.Set(newPacket.GetData(), recieveTimestamp); - - bool hasTargets = result.HasTargets(); - ts.hasTargetEntry.Set(hasTargets, recieveTimestamp); - if (!hasTargets) { - ts.targetPitchEntry.Set(0.0, recieveTimestamp); - ts.targetYawEntry.Set(0.0, recieveTimestamp); - ts.targetAreaEntry.Set(0.0, recieveTimestamp); - std::array poseData{0.0, 0.0, 0.0}; - ts.targetPoseEntry.Set(poseData, recieveTimestamp); - ts.targetSkewEntry.Set(0.0, recieveTimestamp); - } else { - PhotonTrackedTarget bestTarget = result.GetBestTarget(); + uint64_t recieveTimestamp); - ts.targetPitchEntry.Set(bestTarget.GetPitch(), recieveTimestamp); - ts.targetYawEntry.Set(bestTarget.GetYaw(), recieveTimestamp); - ts.targetAreaEntry.Set(bestTarget.GetArea(), recieveTimestamp); - ts.targetSkewEntry.Set(bestTarget.GetSkew(), recieveTimestamp); - - frc::Transform3d transform = bestTarget.GetBestCameraToTarget(); - std::array poseData{ - transform.X().to(), transform.Y().to(), - transform.Rotation().ToRotation2d().Degrees().to()}; - ts.targetPoseEntry.Set(poseData, recieveTimestamp); - } - - auto intrinsics = prop.GetIntrinsics(); - std::vector intrinsicsView{intrinsics.data(), - intrinsics.data() + intrinsics.size()}; - ts.cameraIntrinsicsPublisher.Set(intrinsicsView, recieveTimestamp); - - auto distortion = prop.GetDistCoeffs(); - std::vector distortionView{distortion.data(), - distortion.data() + distortion.size()}; - ts.cameraDistortionPublisher.Set(distortionView, recieveTimestamp); - - ts.heartbeatPublisher.Set(heartbeatCounter++, recieveTimestamp); - } SimCameraProperties prop; private: diff --git a/photon-lib/src/main/native/include/photon/simulation/SimCameraProperties.h b/photon-lib/src/main/native/include/photon/simulation/SimCameraProperties.h index 4632ef5d49..bc188f79d6 100644 --- a/photon-lib/src/main/native/include/photon/simulation/SimCameraProperties.h +++ b/photon-lib/src/main/native/include/photon/simulation/SimCameraProperties.h @@ -45,77 +45,11 @@ class SimCameraProperties { public: SimCameraProperties() { SetCalibration(960, 720, frc::Rotation2d{90_deg}); } SimCameraProperties(std::string path, int width, int height) {} - void SetCalibration(int width, int height, frc::Rotation2d fovDiag) { - if (fovDiag.Degrees() < 1_deg || fovDiag.Degrees() > 179_deg) { - fovDiag = frc::Rotation2d{ - std::clamp(fovDiag.Degrees(), 1_deg, 179_deg)}; - FRC_ReportError( - frc::err::Error, - "Requested invalid FOV! Clamping between (1, 179) degrees..."); - } - double resDiag = std::sqrt(width * width + height * height); - double diagRatio = units::math::tan(fovDiag.Radians() / 2.0); - frc::Rotation2d fovWidth{ - units::radian_t{std::atan(diagRatio * (width / resDiag)) * 2}}; - frc::Rotation2d fovHeight{ - units::radian_t{std::atan(diagRatio * (height / resDiag)) * 2}}; - - Eigen::Matrix newDistCoeffs; - newDistCoeffs << 0, 0, 0, 0, 0; - - double cx = width / 2.0 - 0.5; - double cy = height / 2.0 - 0.5; - - double fx = cx / units::math::tan(fovWidth.Radians() / 2.0); - double fy = cy / units::math::tan(fovHeight.Radians() / 2.0); - - Eigen::Matrix newCamIntrinsics; - newCamIntrinsics << fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0; - SetCalibration(width, height, newCamIntrinsics, newDistCoeffs); - } + void SetCalibration(int width, int height, frc::Rotation2d fovDiag); void SetCalibration(int width, int height, const Eigen::Matrix& newCamIntrinsics, - const Eigen::Matrix& newDistCoeffs) { - resWidth = width; - resHeight = height; - camIntrinsics = newCamIntrinsics; - distCoeffs = newDistCoeffs; - - std::array p{ - frc::Translation3d{ - 1_m, - frc::Rotation3d{0_rad, 0_rad, - (GetPixelYaw(0) + frc::Rotation2d{units::radian_t{ - -std::numbers::pi / 2.0}}) - .Radians()}}, - frc::Translation3d{ - 1_m, frc::Rotation3d{0_rad, 0_rad, - (GetPixelYaw(width) + - frc::Rotation2d{ - units::radian_t{std::numbers::pi / 2.0}}) - .Radians()}}, - frc::Translation3d{ - 1_m, - frc::Rotation3d{0_rad, - (GetPixelPitch(0) + frc::Rotation2d{units::radian_t{ - std::numbers::pi / 2.0}}) - .Radians(), - 0_rad}}, - frc::Translation3d{ - 1_m, frc::Rotation3d{0_rad, - (GetPixelPitch(height) + - frc::Rotation2d{ - units::radian_t{-std::numbers::pi / 2.0}}) - .Radians(), - 0_rad}}, - }; - viewplanes.clear(); - for (size_t i = 0; i < p.size(); i++) { - viewplanes.emplace_back(Eigen::Matrix{ - p[i].X().to(), p[i].Y().to(), p[i].Z().to()}); - } - } + const Eigen::Matrix& newDistCoeffs); void SetCalibError(double newAvgErrorPx, double newErrorStdDevPx) { avgErrorPx = newAvgErrorPx; @@ -220,125 +154,7 @@ class SimCameraProperties { std::pair, std::optional> GetVisibleLine( const RotTrlTransform3d& camRt, const frc::Translation3d& a, - const frc::Translation3d& b) const { - frc::Translation3d relA = camRt.Apply(a); - frc::Translation3d relB = camRt.Apply(b); - - if (relA.X() <= 0_m && relB.X() <= 0_m) { - return {std::nullopt, std::nullopt}; - } - - Eigen::Matrix av{relA.X().to(), relA.Y().to(), - relA.Z().to()}; - Eigen::Matrix bv{relB.X().to(), relB.Y().to(), - relB.Z().to()}; - Eigen::Matrix abv = bv - av; - - bool aVisible = true; - bool bVisible = true; - for (size_t i = 0; i < viewplanes.size(); i++) { - Eigen::Matrix normal = viewplanes[i]; - double aVisibility = av.dot(normal); - if (aVisibility < 0) { - aVisible = false; - } - double bVisibility = bv.dot(normal); - if (bVisibility < 0) { - bVisible = false; - } - if (aVisibility <= 0 && bVisibility <= 0) { - return {std::nullopt, std::nullopt}; - } - } - - if (aVisible && bVisible) { - return {0, 1}; - } - - std::array intersections{std::nan(""), std::nan(""), - std::nan(""), std::nan("")}; - std::vector>> ipts{ - {std::nullopt, std::nullopt, std::nullopt, std::nullopt}}; - - for (size_t i = 0; i < viewplanes.size(); i++) { - Eigen::Matrix normal = viewplanes[i]; - Eigen::Matrix a_projn{}; - a_projn = (av.dot(normal) / normal.dot(normal)) * normal; - - if (std::abs(abv.dot(normal)) < 1e-5) { - continue; - } - intersections[i] = a_projn.dot(a_projn) / -(abv.dot(a_projn)); - - Eigen::Matrix apv{}; - apv = intersections[i] * abv; - Eigen::Matrix intersectpt{}; - intersectpt = av + apv; - ipts[i] = intersectpt; - - for (size_t j = 1; j < viewplanes.size(); j++) { - int oi = (i + j) % viewplanes.size(); - Eigen::Matrix onormal = viewplanes[oi]; - if (intersectpt.dot(onormal) < 0) { - intersections[i] = std::nan(""); - ipts[i] = std::nullopt; - break; - } - } - - if (!ipts[i]) { - continue; - } - - for (int j = i - 1; j >= 0; j--) { - std::optional> oipt = ipts[j]; - if (!oipt) { - continue; - } - Eigen::Matrix diff{}; - diff = oipt.value() - intersectpt; - if (diff.cwiseAbs().maxCoeff() < 1e-4) { - intersections[i] = std::nan(""); - ipts[i] = std::nullopt; - break; - } - } - } - - double inter1 = std::nan(""); - double inter2 = std::nan(""); - for (double inter : intersections) { - if (!std::isnan(inter)) { - if (std::isnan(inter1)) { - inter1 = inter; - } else { - inter2 = inter; - } - } - } - - if (!std::isnan(inter2)) { - double max = std::max(inter1, inter2); - double min = std::min(inter1, inter2); - if (aVisible) { - min = 0; - } - if (bVisible) { - max = 1; - } - return {min, max}; - } else if (!std::isnan(inter1)) { - if (aVisible) { - return {0, inter1}; - } - if (bVisible) { - return {inter1, 1}; - } - return {inter1, std::nullopt}; - } else { - return {std::nullopt, std::nullopt}; - } - } + const frc::Translation3d& b) const; std::vector EstPixelNoise( const std::vector& points) { diff --git a/photon-lib/src/main/native/include/photon/simulation/SimPhotonCamera.h b/photon-lib/src/main/native/include/photon/simulation/SimPhotonCamera.h deleted file mode 100644 index 365efebd59..0000000000 --- a/photon-lib/src/main/native/include/photon/simulation/SimPhotonCamera.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * MIT License - * - * Copyright (c) PhotonVision - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include -#include -#include -#include -#include - -#include - -#include "photon/PhotonCamera.h" -#include "photon/PhotonTargetSortMode.h" -#include "photon/dataflow/structures/Packet.h" -#include "photon/targeting/MultiTargetPNPResult.h" -#include "photon/targeting/PNPResult.h" -#include "photon/targeting/PhotonPipelineResult.h" -#include "photon/targeting/PhotonTrackedTarget.h" - -namespace photon { -class SimPhotonCamera : public PhotonCamera { - public: - SimPhotonCamera(nt::NetworkTableInstance instance, - const std::string& cameraName) - : PhotonCamera(instance, cameraName) { - latencyMillisEntry = rootTable->GetEntry("latencyMillis"); - hasTargetEntry = rootTable->GetEntry("hasTargetEntry"); - targetPitchEntry = rootTable->GetEntry("targetPitchEntry"); - targetYawEntry = rootTable->GetEntry("targetYawEntry"); - targetAreaEntry = rootTable->GetEntry("targetAreaEntry"); - targetSkewEntry = rootTable->GetEntry("targetSkewEntry"); - targetPoseEntry = rootTable->GetEntry("targetPoseEntry"); - rawBytesPublisher = rootTable->GetRawTopic("rawBytes").Publish("rawBytes"); - versionEntry = instance.GetTable("photonvision")->GetEntry("version"); - // versionEntry.SetString(PhotonVersion.versionString); - } - - explicit SimPhotonCamera(const std::string& cameraName) - : SimPhotonCamera(nt::NetworkTableInstance::GetDefault(), cameraName) {} - - virtual ~SimPhotonCamera() = default; - - /** - * Simulate one processed frame of vision data, putting one result to NT. - * - * @param latency Latency of the provided frame - * @param targetList List of targets detected - */ - void SubmitProcessedFrame(units::millisecond_t latency, - std::vector targetList) { - SubmitProcessedFrame(latency, PhotonTargetSortMode::LeftMost(), targetList); - } - - /** - * Simulate one processed frame of vision data, putting one result to NT. - * - * @param latency Latency of the provided frame - * @param sortMode Order in which to sort targets - * @param targetList List of targets detected - */ - void SubmitProcessedFrame( - units::millisecond_t latency, - std::function - sortMode, - std::vector targetList) { - latencyMillisEntry.SetDouble(latency.to()); - std::sort(targetList.begin(), targetList.end(), - [&](auto lhs, auto rhs) { return sortMode(lhs, rhs); }); - PhotonPipelineResult newResult{latency, targetList}; - Packet packet{}; - packet << newResult; - - rawBytesPublisher.Set( - std::span{packet.GetData().data(), packet.GetDataSize()}); - - bool hasTargets = newResult.HasTargets(); - hasTargetEntry.SetBoolean(hasTargets); - if (!hasTargets) { - targetPitchEntry.SetDouble(0.0); - targetYawEntry.SetDouble(0.0); - targetAreaEntry.SetDouble(0.0); - targetPoseEntry.SetDoubleArray( - std::vector{0.0, 0.0, 0.0, 0, 0, 0, 0}); - targetSkewEntry.SetDouble(0.0); - } else { - PhotonTrackedTarget bestTarget = newResult.GetBestTarget(); - targetPitchEntry.SetDouble(bestTarget.GetPitch()); - targetYawEntry.SetDouble(bestTarget.GetYaw()); - targetAreaEntry.SetDouble(bestTarget.GetArea()); - targetSkewEntry.SetDouble(bestTarget.GetSkew()); - - frc::Transform3d transform = bestTarget.GetBestCameraToTarget(); - targetPoseEntry.SetDoubleArray(std::vector{ - transform.X().to(), transform.Y().to(), - transform.Z().to(), transform.Rotation().GetQuaternion().W(), - transform.Rotation().GetQuaternion().X(), - transform.Rotation().GetQuaternion().Y(), - transform.Rotation().GetQuaternion().Z()}); - } - } - - private: - nt::NetworkTableEntry latencyMillisEntry; - nt::NetworkTableEntry hasTargetEntry; - nt::NetworkTableEntry targetPitchEntry; - nt::NetworkTableEntry targetYawEntry; - nt::NetworkTableEntry targetAreaEntry; - nt::NetworkTableEntry targetSkewEntry; - nt::NetworkTableEntry targetPoseEntry; - nt::NetworkTableEntry versionEntry; - nt::RawPublisher rawBytesPublisher; -}; -} // namespace photon diff --git a/photon-lib/src/main/native/include/photon/simulation/SimVisionSystem.h b/photon-lib/src/main/native/include/photon/simulation/SimVisionSystem.h deleted file mode 100644 index 759efb3adc..0000000000 --- a/photon-lib/src/main/native/include/photon/simulation/SimVisionSystem.h +++ /dev/null @@ -1,226 +0,0 @@ -/* - * MIT License - * - * Copyright (c) PhotonVision - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include -#include - -#include -#include -#include -#include - -#include "SimPhotonCamera.h" -#include "SimVisionTarget.h" -#include "photon/dataflow/structures/Packet.h" -#include "photon/targeting/MultiTargetPNPResult.h" -#include "photon/targeting/PNPResult.h" -#include "photon/targeting/PhotonPipelineResult.h" -#include "photon/targeting/PhotonTrackedTarget.h" - -namespace photon { -class SimVisionSystem { - public: - SimPhotonCamera cam; - units::radian_t camHorizFOV{0}; - units::radian_t camVertFOV{0}; - units::meter_t maxLEDRange{0}; - int cameraResWidth{0}; - int cameraResHeight{0}; - double minTargetArea{0.0}; - frc::Transform3d cameraToRobot; - - frc::Field2d dbgField; - frc::FieldObject2d* dbgRobot; - frc::FieldObject2d* dbgCamera; - - std::vector targetList; - - /** - * Create a simulated vision system involving a camera and coprocessor mounted - * on a mobile robot running PhotonVision, detecting one or more targets - * scattered around the field. This assumes a fairly simple and - * distortion-less pinhole camera model. - * - * @param camName Name of the PhotonVision camera to create. Align it with the - * settings you use in the PhotonVision GUI. - * @param camDiagFOV Diagonal Field of View of the camera used. Align it with - * the manufacturer specifications, and/or whatever is configured in the - * PhotonVision Setting page. - * @param cameraToRobot Transform to move from the camera's mount position to - * the robot's position - * @param maxLEDRange Maximum distance at which your camera can illuminate the - * target and make it visible. Set to 9000 or more if your vision system does - * not rely on LED's. - * @param cameraResWidth Width of your camera's image sensor in pixels - * @param cameraResHeight Height of your camera's image sensor in pixels - * @param minTargetArea Minimum area that that the target should be before - * it's recognized as a target by the camera. Match this with your contour - * filtering settings in the PhotonVision GUI. - */ - SimVisionSystem(std::string camName, units::degree_t camDiagFOV, - frc::Transform3d cameraToRobot, units::meter_t maxLEDRange, - int cameraResWidth, int cameraResHeight, double minTargetArea) - : cam(camName), - camHorizFOV((camDiagFOV * cameraResWidth) / - std::hypot(cameraResWidth, cameraResHeight)), - camVertFOV((camDiagFOV * cameraResHeight) / - std::hypot(cameraResWidth, cameraResHeight)), - maxLEDRange(maxLEDRange), - cameraResWidth(cameraResWidth), - cameraResHeight(cameraResHeight), - minTargetArea(minTargetArea), - cameraToRobot(cameraToRobot), - dbgField(), - dbgRobot(dbgField.GetRobotObject()), - dbgCamera(dbgField.GetObject(camName + " Camera")) { - frc::SmartDashboard::PutData(camName + " Sim Field", &dbgField); - } - - /** - * Add a target on the field which your vision system is designed to detect. - * The PhotonCamera from this system will report the location of the robot - * relative to the subset of these targets which are visible from the given - * robot position. - * - * @param target Target to add to the simulated field - */ - void AddSimVisionTarget(SimVisionTarget target) { - targetList.push_back(target); - dbgField.GetObject("Target " + std::to_string(target.targetId)) - ->SetPose(target.targetPose.ToPose2d()); - } - - /** - * Clears all sim vision targets. - * This is useful for switching alliances and needing to repopulate the sim - * targets. NOTE: Old targets will still show on the Field2d unless - * overwritten by new targets with the same ID - */ - void ClearVisionTargets() { targetList.clear(); } - - /** - * Adjust the camera position relative to the robot. Use this if your camera - * is on a gimbal or turret or some other mobile platform. - * - * @param newCameraToRobot New Transform from the robot to the camera - */ - void MoveCamera(frc::Transform3d newCameraToRobot) { - cameraToRobot = newCameraToRobot; - } - - /** - * Periodic update. Call this once per frame of image data you wish to process - * and send to NetworkTables - * - * @param robotPose current pose of the robot on the field. Will be used to - * calculate which targets are actually in view, where they are at relative to - * the robot, and relevant PhotonVision parameters. - */ - void ProcessFrame(frc::Pose2d robotPose) { - ProcessFrame(frc::Pose3d{ - robotPose.X(), robotPose.Y(), 0.0_m, - frc::Rotation3d{0_rad, 0_rad, robotPose.Rotation().Radians()}}); - } - - /** - * Periodic update. Call this once per frame of image data you wish to process - * and send to NetworkTables - * - * @param robotPose current pose of the robot in space. Will be used to - * calculate which targets are actually in view, where they are at relative to - * the robot, and relevant PhotonVision parameters. - */ - void ProcessFrame(frc::Pose3d robotPose) { - frc::Pose3d cameraPose = robotPose.TransformBy(cameraToRobot.Inverse()); - dbgRobot->SetPose(robotPose.ToPose2d()); - dbgCamera->SetPose(cameraPose.ToPose2d()); - - std::vector visibleTargetList{}; - - for (const auto& target : targetList) { - frc::Transform3d camToTargetTransform{cameraPose, target.targetPose}; - frc::Translation3d camToTargetTranslation{ - camToTargetTransform.Translation()}; - - frc::Translation3d altTranslation{camToTargetTranslation.X(), - -1.0 * camToTargetTranslation.Y(), - camToTargetTranslation.Z()}; - frc::Rotation3d altRotation{camToTargetTransform.Rotation() * -1.0}; - frc::Transform3d camToTargetAltTransform{altTranslation, altRotation}; - units::meter_t dist{camToTargetTranslation.Norm()}; - double areaPixels{target.targetArea / GetM2PerPx(dist)}; - units::radian_t yaw{units::math::atan2(camToTargetTranslation.Y(), - camToTargetTranslation.X())}; - units::meter_t cameraHeightOffGround{cameraPose.Z()}; - units::meter_t targetHeightAboveGround(target.targetPose.Z()); - units::radian_t camPitch{cameraPose.Rotation().Y()}; - frc::Transform2d transformAlongGround{cameraPose.ToPose2d(), - target.targetPose.ToPose2d()}; - units::meter_t distanceAlongGround{ - transformAlongGround.Translation().Norm()}; - units::radian_t pitch = - units::math::atan2(targetHeightAboveGround - cameraHeightOffGround, - distanceAlongGround) - - camPitch; - - if (CamCamSeeTarget(dist, yaw, pitch, areaPixels)) { - visibleTargetList.push_back( - PhotonTrackedTarget{yaw.convert().to(), - pitch.convert().to(), - areaPixels, - 0.0, - target.targetId, - camToTargetTransform, - // TODO sim alternate pose - camToTargetTransform, - // TODO ambiguity - 0.0, - {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, - {{0, 0}, {0, 0}, {0, 0}, {0, 0}}}); - } - - cam.SubmitProcessedFrame(0_s, visibleTargetList); - } - } - - units::square_meter_t GetM2PerPx(units::meter_t dist) { - units::meter_t widthMPerPx = - 2 * dist * units::math::tan(camHorizFOV / 2) / cameraResWidth; - units::meter_t heightMPerPx = - 2 * dist * units::math::tan(camVertFOV / 2) / cameraResHeight; - return widthMPerPx * heightMPerPx; - } - - bool CamCamSeeTarget(units::meter_t dist, units::radian_t yaw, - units::radian_t pitch, double area) { - bool inRange = dist < maxLEDRange; - bool inHorizAngle = units::math::abs(yaw) < camHorizFOV / 2; - bool inVertAngle = units::math::abs(pitch) < camVertFOV / 2; - bool targetBigEnough = area > minTargetArea; - return (inRange && inHorizAngle && inVertAngle && targetBigEnough); - } -}; -} // namespace photon diff --git a/photon-lib/src/main/native/include/photon/simulation/SimVisionTarget.h b/photon-lib/src/main/native/include/photon/simulation/SimVisionTarget.h deleted file mode 100644 index 33a9adfee4..0000000000 --- a/photon-lib/src/main/native/include/photon/simulation/SimVisionTarget.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * MIT License - * - * Copyright (c) PhotonVision - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include -#include - -#include "photon/dataflow/structures/Packet.h" -#include "photon/targeting/MultiTargetPNPResult.h" -#include "photon/targeting/PNPResult.h" -#include "photon/targeting/PhotonPipelineResult.h" -#include "photon/targeting/PhotonTrackedTarget.h" - -namespace photon { -class SimVisionTarget { - public: - SimVisionTarget() = default; - - /** - * Describes a vision target located somewhere on the field that your - * SimVisionSystem can detect. - * - * @param targetPose Pose3d of the target in field-relative coordinates - * @param targetWidth Width of the outer bounding box of the target. - * @param targetHeight Pair Height of the outer bounding box of the - * target. - * @param targetId Id of the target - */ - SimVisionTarget(frc::Pose3d targetPose, units::meter_t targetWidth, - units::meter_t targetHeight, int targetId) - : targetPose(targetPose), - targetWidth(targetWidth), - targetHeight(targetHeight), - targetArea(targetHeight * targetWidth), - targetId(targetId) {} - - frc::Pose3d targetPose; - units::meter_t targetWidth; - units::meter_t targetHeight; - units::square_meter_t targetArea; - int targetId; -}; -} // namespace photon diff --git a/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java b/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java index 81842dcc46..4cc0de2af4 100644 --- a/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java +++ b/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java @@ -125,10 +125,9 @@ void testLowestAmbiguityStrategy() { cameraOne.result.setTimestampSeconds(11); PhotonPoseEstimator estimator = - new PhotonPoseEstimator( - aprilTags, PoseStrategy.LOWEST_AMBIGUITY, cameraOne, new Transform3d()); + new PhotonPoseEstimator(aprilTags, PoseStrategy.LOWEST_AMBIGUITY, new Transform3d()); - Optional estimatedPose = estimator.update(); + Optional estimatedPose = estimator.update(cameraOne.result); Pose3d pose = estimatedPose.get().estimatedPose; assertEquals(11, estimatedPose.get().timestampSeconds); @@ -208,10 +207,9 @@ void testClosestToCameraHeightStrategy() { new PhotonPoseEstimator( aprilTags, PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT, - cameraOne, new Transform3d(new Translation3d(0, 0, 4), new Rotation3d())); - Optional estimatedPose = estimator.update(); + Optional estimatedPose = estimator.update(cameraOne.result); Pose3d pose = estimatedPose.get().estimatedPose; assertEquals(4, estimatedPose.get().timestampSeconds); @@ -290,11 +288,10 @@ void closestToReferencePoseStrategy() { new PhotonPoseEstimator( aprilTags, PoseStrategy.CLOSEST_TO_REFERENCE_POSE, - cameraOne, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); estimator.setReferencePose(new Pose3d(1, 1, 1, new Rotation3d())); - Optional estimatedPose = estimator.update(); + Optional estimatedPose = estimator.update(cameraOne.result); Pose3d pose = estimatedPose.get().estimatedPose; assertEquals(17, estimatedPose.get().timestampSeconds); @@ -373,12 +370,11 @@ void closestToLastPose() { new PhotonPoseEstimator( aprilTags, PoseStrategy.CLOSEST_TO_LAST_POSE, - cameraOne, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); estimator.setLastPose(new Pose3d(1, 1, 1, new Rotation3d())); - Optional estimatedPose = estimator.update(); + Optional estimatedPose = estimator.update(cameraOne.result); Pose3d pose = estimatedPose.get().estimatedPose; cameraOne.result = @@ -444,7 +440,7 @@ void closestToLastPose() { new TargetCorner(7, 8))))); cameraOne.result.setTimestampSeconds(7); - estimatedPose = estimator.update(); + estimatedPose = estimator.update(cameraOne.result); pose = estimatedPose.get().estimatedPose; assertEquals(7, estimatedPose.get().timestampSeconds); @@ -485,25 +481,24 @@ void cacheIsInvalidated() { new PhotonPoseEstimator( aprilTags, PoseStrategy.AVERAGE_BEST_TARGETS, - cameraOne, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); // Empty result, expect empty result cameraOne.result = new PhotonPipelineResult(); cameraOne.result.setTimestampSeconds(1); - Optional estimatedPose = estimator.update(); + Optional estimatedPose = estimator.update(cameraOne.result); assertFalse(estimatedPose.isPresent()); // Set actual result cameraOne.result = result; - estimatedPose = estimator.update(); + estimatedPose = estimator.update(cameraOne.result); assertTrue(estimatedPose.isPresent()); assertEquals(20, estimatedPose.get().timestampSeconds, .01); assertEquals(20, estimator.poseCacheTimestampSeconds); // And again -- pose cache should mean this is empty cameraOne.result = result; - estimatedPose = estimator.update(); + estimatedPose = estimator.update(cameraOne.result); assertFalse(estimatedPose.isPresent()); // Expect the old timestamp to still be here assertEquals(20, estimator.poseCacheTimestampSeconds); @@ -513,7 +508,7 @@ void cacheIsInvalidated() { assertEquals(-1, estimator.poseCacheTimestampSeconds); // Update should cache the current timestamp (20) again cameraOne.result = result; - estimatedPose = estimator.update(); + estimatedPose = estimator.update(cameraOne.result); assertEquals(20, estimatedPose.get().timestampSeconds, .01); assertEquals(20, estimator.poseCacheTimestampSeconds); } @@ -588,10 +583,9 @@ void averageBestPoses() { new PhotonPoseEstimator( aprilTags, PoseStrategy.AVERAGE_BEST_TARGETS, - cameraOne, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); - Optional estimatedPose = estimator.update(); + Optional estimatedPose = estimator.update(cameraOne.result); Pose3d pose = estimatedPose.get().estimatedPose; assertEquals(20, estimatedPose.get().timestampSeconds, .01); @@ -607,6 +601,11 @@ public PhotonCameraInjector() { PhotonPipelineResult result; + @Override + public List getAllUnreadResults() { + return List.of(result); + } + @Override public PhotonPipelineResult getLatestResult() { return result; diff --git a/photon-lib/src/test/java/org/photonvision/estimation/ApriltagWorkbenchTest.java b/photon-lib/src/test/java/org/photonvision/estimation/ApriltagWorkbenchTest.java index 1c0750d82b..24ea9fa6cd 100644 --- a/photon-lib/src/test/java/org/photonvision/estimation/ApriltagWorkbenchTest.java +++ b/photon-lib/src/test/java/org/photonvision/estimation/ApriltagWorkbenchTest.java @@ -61,7 +61,6 @@ public void testMeme() throws IOException, InterruptedException { new PhotonPoseEstimator( tagLayout, PhotonPoseEstimator.PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR, - cam, robotToCamera); var field = new Field2d(); @@ -70,10 +69,11 @@ public void testMeme() throws IOException, InterruptedException { while (!Thread.interrupted()) { Thread.sleep(500); - var ret = pe.update(); - System.out.println(ret); - - field.setRobotPose(ret.get().estimatedPose.toPose2d()); + for (var change : cam.getAllUnreadResults()) { + var ret = pe.update(change); + System.out.println(ret); + field.setRobotPose(ret.get().estimatedPose.toPose2d()); + } } } } diff --git a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp index aed60ff2fc..a7c65a6cb9 100644 --- a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp +++ b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp @@ -83,12 +83,16 @@ TEST(PhotonPoseEstimatorTest, LowestAmbiguityStrategy) { 0.4, corners, detectedCorners}}; cameraOne.test = true; - cameraOne.testResult = {2_ms, targets}; - cameraOne.testResult.SetTimestamp(units::second_t(11)); + cameraOne.testResult = {{2_ms, targets}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(11)); photon::PhotonPoseEstimator estimator(aprilTags, photon::LOWEST_AMBIGUITY, - std::move(cameraOne), {}); - auto estimatedPose = estimator.Update(); + frc::Transform3d{}); + + std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } frc::Pose3d pose = estimatedPose.value().estimatedPose; EXPECT_NEAR(11, units::unit_cast(estimatedPose.value().timestamp), @@ -138,13 +142,17 @@ TEST(PhotonPoseEstimatorTest, ClosestToCameraHeightStrategy) { 0.4, corners, detectedCorners}}; cameraOne.test = true; - cameraOne.testResult = {2_ms, targets}; - cameraOne.testResult.SetTimestamp(17_s); + cameraOne.testResult = {{2_ms, targets}}; + cameraOne.testResult[0].SetTimestamp(17_s); photon::PhotonPoseEstimator estimator( - aprilTags, photon::CLOSEST_TO_CAMERA_HEIGHT, std::move(cameraOne), - {{0_m, 0_m, 4_m}, {}}); - auto estimatedPose = estimator.Update(); + aprilTags, photon::CLOSEST_TO_CAMERA_HEIGHT, {{0_m, 0_m, 4_m}, {}}); + + std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + frc::Pose3d pose = estimatedPose.value().estimatedPose; EXPECT_NEAR(17, units::unit_cast(estimatedPose.value().timestamp), @@ -181,14 +189,19 @@ TEST(PhotonPoseEstimatorTest, ClosestToReferencePoseStrategy) { 0.4, corners, detectedCorners}}; cameraOne.test = true; - cameraOne.testResult = {2_ms, targets}; - cameraOne.testResult.SetTimestamp(units::second_t(17)); + cameraOne.testResult = {{2_ms, targets}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(17)); - photon::PhotonPoseEstimator estimator( - aprilTags, photon::CLOSEST_TO_REFERENCE_POSE, std::move(cameraOne), {}); + photon::PhotonPoseEstimator estimator(aprilTags, + photon::CLOSEST_TO_REFERENCE_POSE, {}); estimator.SetReferencePose( frc::Pose3d(1_m, 1_m, 1_m, frc::Rotation3d(0_rad, 0_rad, 0_rad))); - auto estimatedPose = estimator.Update(); + + std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + frc::Pose3d pose = estimatedPose.value().estimatedPose; EXPECT_NEAR(17, units::unit_cast(estimatedPose.value().timestamp), @@ -225,14 +238,19 @@ TEST(PhotonPoseEstimatorTest, ClosestToLastPose) { 0.4, corners, detectedCorners}}; cameraOne.test = true; - cameraOne.testResult = {2_ms, targets}; - cameraOne.testResult.SetTimestamp(units::second_t(17)); + cameraOne.testResult = {{2_ms, targets}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(17)); photon::PhotonPoseEstimator estimator(aprilTags, photon::CLOSEST_TO_LAST_POSE, - std::move(cameraOne), {}); + {}); estimator.SetLastPose( frc::Pose3d(1_m, 1_m, 1_m, frc::Rotation3d(0_rad, 0_rad, 0_rad))); - auto estimatedPose = estimator.Update(); + + std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + ASSERT_TRUE(estimatedPose); frc::Pose3d pose = estimatedPose.value().estimatedPose; @@ -259,10 +277,14 @@ TEST(PhotonPoseEstimatorTest, ClosestToLastPose) { frc::Rotation3d(0_rad, 0_rad, 0_rad)), 0.4, corners, detectedCorners}}; - estimator.GetCamera()->testResult = {2_ms, targetsThree}; - estimator.GetCamera()->testResult.SetTimestamp(units::second_t(21)); + cameraOne.testResult = {{2_ms, targetsThree}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(21)); + + // std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } - estimatedPose = estimator.Update(); ASSERT_TRUE(estimatedPose); pose = estimatedPose.value().estimatedPose; @@ -300,12 +322,17 @@ TEST(PhotonPoseEstimatorTest, AverageBestPoses) { 0.4, corners, detectedCorners}}; cameraOne.test = true; - cameraOne.testResult = {2_ms, targets}; - cameraOne.testResult.SetTimestamp(units::second_t(15)); + cameraOne.testResult = {{2_ms, targets}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(15)); photon::PhotonPoseEstimator estimator(aprilTags, photon::AVERAGE_BEST_TARGETS, - std::move(cameraOne), {}); - auto estimatedPose = estimator.Update(); + {}); + + std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + frc::Pose3d pose = estimatedPose.value().estimatedPose; EXPECT_NEAR(15.0, units::unit_cast(estimatedPose.value().timestamp), @@ -344,22 +371,34 @@ TEST(PhotonPoseEstimatorTest, PoseCache) { cameraOne.test = true; photon::PhotonPoseEstimator estimator(aprilTags, photon::AVERAGE_BEST_TARGETS, - std::move(cameraOne), {}); + {}); // empty input, expect empty out - estimator.GetCamera()->testResult = {2_ms, {}}; - estimator.GetCamera()->testResult.SetTimestamp(units::second_t(1)); - auto estimatedPose = estimator.Update(); + cameraOne.testResult = {{2_ms, {}}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(1)); + + std::optional estimatedPose; + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + EXPECT_FALSE(estimatedPose); // Set result, and update -- expect present and timestamp to be 15 - estimator.GetCamera()->testResult = {3_ms, targets}; - estimator.GetCamera()->testResult.SetTimestamp(units::second_t(15)); - estimatedPose = estimator.Update(); + cameraOne.testResult = {{3_ms, targets}}; + cameraOne.testResult[0].SetTimestamp(units::second_t(15)); + + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + EXPECT_TRUE(estimatedPose); EXPECT_NEAR(15, estimatedPose.value().timestamp.to(), 1e-6); // And again -- now pose cache should be empty - estimatedPose = estimator.Update(); + for (const auto& result : cameraOne.GetAllUnreadResults()) { + estimatedPose = estimator.Update(result); + } + EXPECT_FALSE(estimatedPose); } diff --git a/photon-lib/src/test/native/cpp/SimVisionSystemTest.cpp b/photon-lib/src/test/native/cpp/SimVisionSystemTest.cpp deleted file mode 100644 index 2c985c6e56..0000000000 --- a/photon-lib/src/test/native/cpp/SimVisionSystemTest.cpp +++ /dev/null @@ -1,358 +0,0 @@ -/* - * MIT License - * - * Copyright (c) PhotonVision - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "gtest/gtest.h" -#include "photon/PhotonUtils.h" -#include "photon/simulation/SimVisionSystem.h" - -class SimVisionSystemTest : public ::testing::Test { - void SetUp() override { - nt::NetworkTableInstance::GetDefault().StartServer(); - photon::PhotonCamera::SetVersionCheckEnabled(false); - } - - void TearDown() override {} -}; - -class SimVisionSystemTestWithParamsTest - : public SimVisionSystemTest, - public testing::WithParamInterface {}; -class SimVisionSystemTestDistanceParamsTest - : public SimVisionSystemTest, - public testing::WithParamInterface< - std::tuple> {}; - -TEST_F(SimVisionSystemTest, TestEmpty) { - photon::SimVisionSystem sys{ - "Test", 80.0_deg, frc::Transform3d{}, 99999_m, 320, 240, 0}; - SUCCEED(); -} - -TEST_F(SimVisionSystemTest, TestVisibilityCupidShuffle) { - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 2_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - photon::SimVisionSystem sys{ - "Test", 80.0_deg, frc::Transform3d{}, 99999_m, 640, 480, 0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{targetPose, 1_m, 3_m, 3}); - - // To the right, to the right - frc::Pose2d robotPose{{5_m, 0_m}, frc::Rotation2d{-70_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); - - // To the right, to the right - robotPose = frc::Pose2d{{5_m, 0_m}, frc::Rotation2d{-95_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); - - // To the left, to the left - robotPose = frc::Pose2d{{5_m, 0_m}, frc::Rotation2d{90_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); - - // To the left, to the left - robotPose = frc::Pose2d{{5_m, 0_m}, frc::Rotation2d{65_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); - - // now kick, now kick - robotPose = frc::Pose2d{{2_m, 0_m}, frc::Rotation2d{5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); - - // now kick, now kick - robotPose = frc::Pose2d{{2_m, 0_m}, frc::Rotation2d{-5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); - - // now walk it by yourself - robotPose = frc::Pose2d{{2_m, 0_m}, frc::Rotation2d{-179_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); - - // now walk it by yourself - sys.MoveCamera(frc::Transform3d{ - frc::Translation3d{}, - frc::Rotation3d{0_rad, 0_rad, units::constants::detail::PI_VAL * 1_rad}}); - sys.ProcessFrame(robotPose); - ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); -} - -TEST_F(SimVisionSystemTest, TestNotVisibleVertOne) { - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 1_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - photon::SimVisionSystem sys{ - "Test", 80.0_deg, frc::Transform3d{}, 99999_m, 640, 480, 0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{targetPose, 1_m, 3_m, 3}); - - frc::Pose2d robotPose{{5_m, 0_m}, frc::Rotation2d{5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); - - sys.MoveCamera(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 5000_m}, - frc::Rotation3d{0_rad, 0_rad, units::constants::detail::PI_VAL * 1_rad}}); - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); -} - -// TEST_F(SimVisionSystemTest, TestNotVisibleVertTwo) { -// const frc::Pose3d targetPose{ -// {15.98_m, 0_m, 2_m}, -// frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * -// 1_rad}}; -// frc::Transform3d robotToCamera{ -// frc::Translation3d{0_m, 0_m, 1_m}, -// frc::Rotation3d{0_deg, (units::constants::detail::PI_VAL / 4) * 1_rad, -// 0_deg}}; -// photon::SimVisionSystem sys{ -// "Test", 80.0_deg, robotToCamera.Inverse(), 99999_m, 1234, 1234, 0}; -// sys.AddSimVisionTarget( -// photon::SimVisionTarget{targetPose, 3_m, 0.5_m, 1736}); - -// frc::Pose2d robotPose{{14.98_m, 0_m}, frc::Rotation2d{5_deg}}; -// sys.ProcessFrame(robotPose); -// ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); - -// robotPose = frc::Pose2d{frc::Translation2d{0_m, 0_m}, -// frc::Rotation2d{5_deg}}; sys.ProcessFrame(robotPose); -// ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); -// } - -TEST_F(SimVisionSystemTest, TestNotVisibleTargetSize) { - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 1_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - photon::SimVisionSystem sys{ - "Test", 80.0_deg, frc::Transform3d{}, 99999_m, 640, 480, 20.0}; - sys.AddSimVisionTarget( - photon::SimVisionTarget{targetPose, 0.1_m, 0.025_m, 24}); - - frc::Pose2d robotPose{{12_m, 0_m}, frc::Rotation2d{5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); - - robotPose = frc::Pose2d{frc::Translation2d{0_m, 0_m}, frc::Rotation2d{5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); -} - -TEST_F(SimVisionSystemTest, TestNotVisibleTooFarForLEDs) { - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 1_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - photon::SimVisionSystem sys{"Test", 80.0_deg, frc::Transform3d{}, 10_m, 640, - 480, 1.0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{targetPose, 1_m, 0.25_m, 78}); - - frc::Pose2d robotPose{{10_m, 0_m}, frc::Rotation2d{5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_TRUE(sys.cam.GetLatestResult().HasTargets()); - - robotPose = frc::Pose2d{frc::Translation2d{0_m, 0_m}, frc::Rotation2d{5_deg}}; - sys.ProcessFrame(robotPose); - ASSERT_FALSE(sys.cam.GetLatestResult().HasTargets()); -} - -TEST_P(SimVisionSystemTestWithParamsTest, YawAngles) { - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 0_m}, - frc::Rotation3d{0_deg, 0_deg, - (3 * units::constants::detail::PI_VAL / 4) * 1_rad}}; - frc::Pose2d robotPose{{10_m, 0_m}, frc::Rotation2d{GetParam() * -1.0}}; - photon::SimVisionSystem sys{ - "Test", 120.0_deg, frc::Transform3d{}, 99999_m, 640, 480, 0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{targetPose, 0.5_m, 0.5_m, 23}); - - sys.ProcessFrame(robotPose); - - auto results = sys.cam.GetLatestResult(); - - ASSERT_TRUE(results.HasTargets()); - - photon::PhotonTrackedTarget target = results.GetBestTarget(); - - ASSERT_NEAR(GetParam().to(), target.GetYaw(), 0.0001); -} - -TEST_P(SimVisionSystemTestWithParamsTest, PitchAngles) { - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 0_m}, - frc::Rotation3d{0_deg, 0_deg, - (3 * units::constants::detail::PI_VAL / 4) * 1_rad}}; - frc::Pose2d robotPose{{10_m, 0_m}, frc::Rotation2d{0_deg}}; - photon::SimVisionSystem sys{ - "Test", 120.0_deg, frc::Transform3d{}, 99999_m, 640, 480, 0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{targetPose, 0.5_m, 0.5_m, 23}); - - sys.MoveCamera(frc::Transform3d{frc::Translation3d{}, - frc::Rotation3d{0_deg, GetParam(), 0_deg}}); - sys.ProcessFrame(robotPose); - - auto results = sys.cam.GetLatestResult(); - - ASSERT_TRUE(results.HasTargets()); - - photon::PhotonTrackedTarget target = results.GetBestTarget(); - - ASSERT_NEAR(GetParam().to(), target.GetPitch(), 0.0001); -} - -INSTANTIATE_TEST_SUITE_P(AnglesTests, SimVisionSystemTestWithParamsTest, - testing::Values(-10_deg, -5_deg, -0_deg, -1_deg, - -2_deg, 5_deg, 7_deg, 10.23_deg, - 20.21_deg, -19.999_deg)); - -TEST_P(SimVisionSystemTestDistanceParamsTest, DistanceCalc) { - units::foot_t distParam; - units::degree_t pitchParam; - units::foot_t heightParam; - std::tie(distParam, pitchParam, heightParam) = GetParam(); - - const frc::Pose3d targetPose{ - {15.98_m, 0_m, 1_m}, - frc::Rotation3d{0_deg, 0_deg, - (units::constants::detail::PI_VAL * 0.98) * 1_rad}}; - frc::Pose3d robotPose{{15.98_m - distParam, 0_m, 0_m}, frc::Rotation3d{}}; - frc::Transform3d robotToCamera{frc::Translation3d{0_m, 0_m, heightParam}, - frc::Rotation3d{0_deg, pitchParam, 0_deg}}; - photon::SimVisionSystem sys{ - "absurdlylongnamewhichshouldneveractuallyhappenbuteehwelltestitanywaysoho" - "wsyourdaygoingihopegoodhaveagreatrestofyourlife", - 160.0_deg, - robotToCamera.Inverse(), - 99999_m, - 640, - 480, - 0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{targetPose, 0.5_m, 0.5_m, 0}); - sys.ProcessFrame(robotPose); - - auto results = sys.cam.GetLatestResult(); - - ASSERT_TRUE(results.HasTargets()); - - photon::PhotonTrackedTarget target = results.GetBestTarget(); - - ASSERT_NEAR(target.GetYaw(), 0.0, 0.0001); - - units::meter_t dist = photon::PhotonUtils::CalculateDistanceToTarget( - robotToCamera.Z(), targetPose.Z(), pitchParam, - units::degree_t{target.GetPitch()}); - ASSERT_NEAR(dist.to(), - distParam.convert().to(), 0.001); -} - -INSTANTIATE_TEST_SUITE_P( - DistanceParamsTests, SimVisionSystemTestDistanceParamsTest, - testing::Values(std::make_tuple(5_ft, 15.98_deg, 0_ft), - std::make_tuple(6_ft, 15.98_deg, 1_ft), - std::make_tuple(10_ft, 15.98_deg, 0_ft), - std::make_tuple(15_ft, 15.98_deg, 2_ft), - std::make_tuple(19.95_ft, 15.98_deg, 0_ft), - std::make_tuple(20_ft, 15.98_deg, 0_ft), - std::make_tuple(5_ft, 42_deg, 1_ft), - std::make_tuple(6_ft, 42_deg, 0_ft), - std::make_tuple(10_ft, 42_deg, 2_ft), - std::make_tuple(15_ft, 42_deg, 0.5_ft), - std::make_tuple(19.42_ft, 15.98_deg, 0_ft), - std::make_tuple(20_ft, 42_deg, 0_ft), - std::make_tuple(5_ft, 55_deg, 2_ft), - std::make_tuple(6_ft, 55_deg, 0_ft), - std::make_tuple(10_ft, 54_deg, 2.2_ft), - std::make_tuple(15_ft, 53_deg, 0_ft), - std::make_tuple(19.52_ft, 15.98_deg, 1.1_ft))); - -TEST_F(SimVisionSystemTest, TestMultipleTargets) { - const frc::Pose3d targetPoseL{ - {15.98_m, 2_m, 0_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - const frc::Pose3d targetPoseC{ - {15.98_m, 0_m, 0_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - const frc::Pose3d targetPoseR{ - {15.98_m, -2_m, 0_m}, - frc::Rotation3d{0_deg, 0_deg, units::constants::detail::PI_VAL * 1_rad}}; - photon::SimVisionSystem sys{ - "Test", 160.0_deg, frc::Transform3d{}, 99999_m, 640, 480, 20.0}; - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseL.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 1}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseC.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 2}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseR.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 3}); - - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseL.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 1_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 4}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseC.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 1_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 5}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseR.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 1_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 6}); - - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseL.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0.5_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 7}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseC.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0.5_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 8}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseL.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0.75_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 9}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseR.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0.75_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 10}); - sys.AddSimVisionTarget(photon::SimVisionTarget{ - targetPoseL.TransformBy(frc::Transform3d{ - frc::Translation3d{0_m, 0_m, 0.25_m}, frc::Rotation3d{}}), - 0.25_m, 0.25_m, 11}); - - frc::Pose2d robotPose{{6_m, 0_m}, frc::Rotation2d{.25_deg}}; - sys.ProcessFrame(robotPose); - - auto results = sys.cam.GetLatestResult(); - - ASSERT_TRUE(results.HasTargets()); - - auto targetList = results.GetTargets(); - - ASSERT_EQ(targetList.size(), size_t(11)); -} diff --git a/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp b/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp index 667b6f53af..d04c5f2fa7 100644 --- a/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp +++ b/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp @@ -22,10 +22,15 @@ * SOFTWARE. */ +#include + #include "gtest/gtest.h" #include "photon/PhotonUtils.h" #include "photon/simulation/VisionSystemSim.h" +// Ignore GetLatestResult warnings +WPI_IGNORE_DEPRECATED + class VisionSystemSimTest : public ::testing::Test { void SetUp() override { nt::NetworkTableInstance::GetDefault().StartServer(); @@ -224,9 +229,10 @@ TEST_P(VisionSystemSimTestWithParamsTest, YawAngles) { robotPose = frc::Pose2d{frc::Translation2d{10_m, 0_m}, frc::Rotation2d{GetParam()}}; visionSysSim.Update(robotPose); - ASSERT_TRUE(camera.GetLatestResult().HasTargets()); - ASSERT_NEAR(GetParam().to(), - camera.GetLatestResult().GetBestTarget().GetYaw(), 0.25); + + const auto result = camera.GetLatestResult(); + ASSERT_TRUE(result.HasTargets()); + ASSERT_NEAR(GetParam().to(), result.GetBestTarget().GetYaw(), 0.25); } TEST_P(VisionSystemSimTestWithParamsTest, PitchAngles) { @@ -251,9 +257,10 @@ TEST_P(VisionSystemSimTestWithParamsTest, PitchAngles) { frc::Translation3d{}, frc::Rotation3d{0_rad, units::degree_t{GetParam()}, 0_rad}}); visionSysSim.Update(robotPose); - ASSERT_TRUE(camera.GetLatestResult().HasTargets()); - ASSERT_NEAR(GetParam().to(), - camera.GetLatestResult().GetBestTarget().GetPitch(), 0.25); + + const auto result = camera.GetLatestResult(); + ASSERT_TRUE(result.HasTargets()); + ASSERT_NEAR(GetParam().to(), result.GetBestTarget().GetPitch(), 0.25); } INSTANTIATE_TEST_SUITE_P(AnglesTests, VisionSystemSimTestWithParamsTest, diff --git a/photon-targeting/src/main/java/org/photonvision/common/networktables/PacketSubscriber.java b/photon-targeting/src/main/java/org/photonvision/common/networktables/PacketSubscriber.java index 8cc9f44545..964c1c3661 100644 --- a/photon-targeting/src/main/java/org/photonvision/common/networktables/PacketSubscriber.java +++ b/photon-targeting/src/main/java/org/photonvision/common/networktables/PacketSubscriber.java @@ -18,33 +18,84 @@ package org.photonvision.common.networktables; import edu.wpi.first.networktables.RawSubscriber; +import java.util.ArrayList; +import java.util.List; import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.dataflow.structures.PacketSerde; public class PacketSubscriber implements AutoCloseable { + public static class PacketResult { + public final U value; + public final long timestamp; + + public PacketResult(U value, long timestamp) { + this.value = value; + this.timestamp = timestamp; + } + + public PacketResult() { + this(null, 0); + } + } + public final RawSubscriber subscriber; private final PacketSerde serde; - private final T defaultValue; private final Packet packet = new Packet(1); - public PacketSubscriber(RawSubscriber subscriber, PacketSerde serde, T defaultValue) { + /** + * Create a PacketSubscriber + * + * @param subscriber NT subscriber. Set pollStorage to 1 to make get() faster + * @param serde How we convert raw to actual things + */ + public PacketSubscriber(RawSubscriber subscriber, PacketSerde serde) { this.subscriber = subscriber; this.serde = serde; - this.defaultValue = defaultValue; } - public T get() { + /** Parse one chunk of timestamped data into T */ + private PacketResult parse(byte[] data, long timestamp) { packet.clear(); + packet.setData(data); + if (packet.getSize() < 1) { + return new PacketResult(); + } + + return new PacketResult<>(serde.unpack(packet), timestamp); + } - packet.setData(subscriber.get(new byte[] {})); - if (packet.getSize() < 1) return defaultValue; + /** + * Get the latest value sent over NT. If the value has never been set, returns the provided + * default + */ + public PacketResult get() { + // Get /all/ changes since last call to readQueue + var data = subscriber.getAtomic(); - return serde.unpack(packet); + // Topic has never been published to? + if (data.timestamp == 0) { + return new PacketResult<>(); + } + + return parse(data.value, data.timestamp); } @Override public void close() { subscriber.close(); } + + public List> getAllChanges() { + List> ret = new ArrayList<>(); + + // Get /all/ changes since last call to readQueue + var changes = subscriber.readQueue(); + + for (var change : changes) { + ret.add(parse(change.value, change.timestamp)); + } + + return ret; + } } diff --git a/photon-targeting/src/main/native/include/photon/estimation/OpenCVHelp.h b/photon-targeting/src/main/native/include/photon/estimation/OpenCVHelp.h index 2d79e661e0..f990bf01fc 100644 --- a/photon-targeting/src/main/native/include/photon/estimation/OpenCVHelp.h +++ b/photon-targeting/src/main/native/include/photon/estimation/OpenCVHelp.h @@ -51,6 +51,7 @@ static std::vector GetConvexHull( return convexPoints; } +[[maybe_unused]] static cv::RotatedRect GetMinAreaRect(const std::vector& points) { return cv::minAreaRect(points); } @@ -120,6 +121,7 @@ static std::vector RotationToRVec( return cv::boundingRect(points); } +[[maybe_unused]] static std::vector ProjectPoints( const Eigen::Matrix& cameraMatrix, const Eigen::Matrix& distCoeffs, diff --git a/photon-targeting/src/main/native/include/photon/estimation/VisionEstimation.h b/photon-targeting/src/main/native/include/photon/estimation/VisionEstimation.h index cb3ae3bdc5..56fb6b8776 100644 --- a/photon-targeting/src/main/native/include/photon/estimation/VisionEstimation.h +++ b/photon-targeting/src/main/native/include/photon/estimation/VisionEstimation.h @@ -32,6 +32,7 @@ namespace photon { namespace VisionEstimation { +[[maybe_unused]] static std::vector GetVisibleLayoutTags( const std::vector& visTags, const frc::AprilTagFieldLayout& layout) { diff --git a/photonlib-cpp-examples/apriltagExample/src/main/include/PhotonCameraWrapper.h b/photonlib-cpp-examples/apriltagExample/src/main/include/PhotonCameraWrapper.h index 4cb11e2e0c..b63362eee1 100644 --- a/photonlib-cpp-examples/apriltagExample/src/main/include/PhotonCameraWrapper.h +++ b/photonlib-cpp-examples/apriltagExample/src/main/include/PhotonCameraWrapper.h @@ -33,15 +33,21 @@ #include class PhotonCameraWrapper { + private: + photon::PhotonCamera camera{"WPI2023"}; + public: photon::PhotonPoseEstimator m_poseEstimator{ frc::LoadAprilTagLayoutField(frc::AprilTagField::k2023ChargedUp), - photon::MULTI_TAG_PNP_ON_RIO, std::move(photon::PhotonCamera{"WPI2023"}), - frc::Transform3d{}}; + photon::MULTI_TAG_PNP_ON_RIO, frc::Transform3d{}}; inline std::optional Update( frc::Pose2d estimatedPose) { m_poseEstimator.SetReferencePose(frc::Pose3d(estimatedPose)); - return m_poseEstimator.Update(); + std::optional ret = std::nullopt; + for (const auto& change : camera.GetAllUnreadResults()) + ret = m_poseEstimator.Update(change); + + return ret; } }; diff --git a/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/Vision.java b/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/Vision.java index d1e88c9258..f156099cd6 100644 --- a/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/Vision.java +++ b/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/Vision.java @@ -46,7 +46,6 @@ public class Vision { private final PhotonCamera camera; private final PhotonPoseEstimator photonEstimator; - private double lastEstTimestamp = 0; // Simulation private PhotonCameraSim cameraSim; @@ -56,8 +55,7 @@ public Vision() { camera = new PhotonCamera(kCameraName); photonEstimator = - new PhotonPoseEstimator( - kTagLayout, PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR, camera, kRobotToCam); + new PhotonPoseEstimator(kTagLayout, PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR, kRobotToCam); photonEstimator.setMultiTagFallbackStrategy(PoseStrategy.LOWEST_AMBIGUITY); // ----- Simulation @@ -95,20 +93,21 @@ public PhotonPipelineResult getLatestResult() { * used for estimation. */ public Optional getEstimatedGlobalPose() { - var visionEst = photonEstimator.update(); - double latestTimestamp = camera.getLatestResult().getTimestampSeconds(); - boolean newResult = Math.abs(latestTimestamp - lastEstTimestamp) > 1e-5; - if (Robot.isSimulation()) { - visionEst.ifPresentOrElse( - est -> - getSimDebugField() - .getObject("VisionEstimation") - .setPose(est.estimatedPose.toPose2d()), - () -> { - if (newResult) getSimDebugField().getObject("VisionEstimation").setPoses(); - }); + Optional visionEst = Optional.empty(); + for (var change : camera.getAllUnreadResults()) { + visionEst = photonEstimator.update(change); + + if (Robot.isSimulation()) { + visionEst.ifPresentOrElse( + est -> + getSimDebugField() + .getObject("VisionEstimation") + .setPose(est.estimatedPose.toPose2d()), + () -> { + getSimDebugField().getObject("VisionEstimation").setPoses(); + }); + } } - if (newResult) lastEstTimestamp = latestTimestamp; return visionEst; } diff --git a/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/subsystems/drivetrain/SwerveDriveSim.java b/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/subsystems/drivetrain/SwerveDriveSim.java index aff69fc703..85849141d4 100644 --- a/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/subsystems/drivetrain/SwerveDriveSim.java +++ b/photonlib-java-examples/swervedriveposeestsim/src/main/java/frc/robot/subsystems/drivetrain/SwerveDriveSim.java @@ -24,6 +24,7 @@ package frc.robot.subsystems.drivetrain; +import edu.wpi.first.math.MatBuilder; import edu.wpi.first.math.MathUtil; import edu.wpi.first.math.Matrix; import edu.wpi.first.math.Nat; @@ -114,18 +115,18 @@ public SwerveDriveSim( SwerveDriveKinematics kinematics) { this( new LinearSystem( - Matrix.mat(Nat.N2(), Nat.N2()).fill(0.0, 1.0, 0.0, -driveFF.kv / driveFF.ka), + MatBuilder.fill(Nat.N2(), Nat.N2(), 0.0, 1.0, 0.0, -driveFF.kv / driveFF.ka), VecBuilder.fill(0.0, 1.0 / driveFF.ka), - Matrix.mat(Nat.N2(), Nat.N2()).fill(1.0, 0.0, 0.0, 1.0), + MatBuilder.fill(Nat.N2(), Nat.N2(), 1.0, 0.0, 0.0, 1.0), VecBuilder.fill(0.0, 0.0)), driveFF.ks, driveMotor, driveGearing, driveWheelRadius, new LinearSystem( - Matrix.mat(Nat.N2(), Nat.N2()).fill(0.0, 1.0, 0.0, -steerFF.kv / steerFF.ka), + MatBuilder.fill(Nat.N2(), Nat.N2(), 0.0, 1.0, 0.0, -steerFF.kv / steerFF.ka), VecBuilder.fill(0.0, 1.0 / steerFF.ka), - Matrix.mat(Nat.N2(), Nat.N2()).fill(1.0, 0.0, 0.0, 1.0), + MatBuilder.fill(Nat.N2(), Nat.N2(), 1.0, 0.0, 0.0, 1.0), VecBuilder.fill(0.0, 0.0)), steerFF.ks, steerMotor, diff --git a/shared/config.gradle b/shared/config.gradle index 4044d7e3ff..e6fde7f46b 100644 --- a/shared/config.gradle +++ b/shared/config.gradle @@ -33,6 +33,9 @@ model { binaries { withType(NativeBinarySpec).all { nativeUtils.usePlatformArguments(it) + if (it.toolChain instanceof GccCompatibleToolChain) { + it.cppCompiler.args << "-Wno-deprecated-enum-enum-conversion" + } } } } From f81e466a139c719073d35e69ae5eb82fcddd90be Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 10 May 2024 11:18:24 -0700 Subject: [PATCH 02/10] Fix compile errors --- .../java/org/photonvision/PhotonCamera.java | 1 - .../main/native/cpp/photon/PhotonCamera.cpp | 2 +- .../cpp/photon/simulation/PhotonCameraSim.cpp | 5 ++-- .../native/cpp/PhotonPoseEstimatorTest.cpp | 24 +++++++++---------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java index f1508240f4..a0139304b5 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java @@ -44,7 +44,6 @@ import edu.wpi.first.networktables.PubSubOption; import edu.wpi.first.networktables.StringSubscriber; import edu.wpi.first.wpilibj.DriverStation; -import edu.wpi.first.wpilibj.RobotController; import edu.wpi.first.wpilibj.Timer; import java.util.ArrayList; import java.util.List; diff --git a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp index 2821c996cf..9e418dbe2d 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp @@ -157,7 +157,7 @@ std::vector PhotonCamera::GetAllUnreadResults() { PhotonPipelineResult& result = ret[i]; packet >> result; - result.SetTimestamp(units::microsecond_t(value.time) - result.GetLatency()); + result.SetRecieveTimestamp(units::microsecond_t(value.time) - result.GetLatency()); } return ret; diff --git a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp index 0b10023ec6..a3e57f2b75 100644 --- a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp +++ b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp @@ -326,7 +326,8 @@ PhotonPipelineResult PhotonCameraSim::Process( multiTagResults = MultiTargetPNPResult{pnpResult, usedIds}; } - return PhotonPipelineResult{latency, detectableTgts, multiTagResults}; + heartbeatCounter++; + return PhotonPipelineResult{heartbeatCounter, 0_s, latency, detectableTgts, multiTagResults}; } void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result) { SubmitProcessedFrame(result, wpi::Now()); @@ -376,7 +377,7 @@ void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result, distortion.data() + distortion.size()}; ts.cameraDistortionPublisher.Set(distortionView, recieveTimestamp); - ts.heartbeatPublisher.Set(heartbeatCounter++, recieveTimestamp); + ts.heartbeatPublisher.Set(heartbeatCounter, recieveTimestamp); ts.subTable->GetInstance().Flush(); } diff --git a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp index 4ca00d4fc9..f42fad8b34 100644 --- a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp +++ b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp @@ -84,7 +84,7 @@ TEST(PhotonPoseEstimatorTest, LowestAmbiguityStrategy) { cameraOne.test = true; cameraOne.testResult = {{0, 0_s, 2_ms, targets}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(11)); + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(11)); photon::PhotonPoseEstimator estimator(aprilTags, photon::LOWEST_AMBIGUITY, frc::Transform3d{}); @@ -143,7 +143,7 @@ TEST(PhotonPoseEstimatorTest, ClosestToCameraHeightStrategy) { cameraOne.test = true; cameraOne.testResult = {{0, 0_s, 2_ms, targets}}; - cameraOne.testResult[0].SetTimestamp(17_s); + cameraOne.testResult[0].SetRecieveTimestamp(17_s); photon::PhotonPoseEstimator estimator( aprilTags, photon::CLOSEST_TO_CAMERA_HEIGHT, {{0_m, 0_m, 4_m}, {}}); @@ -190,7 +190,7 @@ TEST(PhotonPoseEstimatorTest, ClosestToReferencePoseStrategy) { cameraOne.test = true; cameraOne.testResult = {{0, 0_s, 2_ms, targets}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(17)); + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(17)); photon::PhotonPoseEstimator estimator(aprilTags, photon::CLOSEST_TO_REFERENCE_POSE, {}); @@ -239,7 +239,7 @@ TEST(PhotonPoseEstimatorTest, ClosestToLastPose) { cameraOne.test = true; cameraOne.testResult = {{0, 0_s, 2_ms, targets}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(17)); + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(17)); photon::PhotonPoseEstimator estimator(aprilTags, photon::CLOSEST_TO_LAST_POSE, {}); @@ -277,8 +277,8 @@ TEST(PhotonPoseEstimatorTest, ClosestToLastPose) { frc::Rotation3d(0_rad, 0_rad, 0_rad)), 0.4, corners, detectedCorners}}; - cameraOne.testResult = {{0,0_s, 2_ms, targetsThree}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(21)); + cameraOne.testResult = {{0, 0_s, 2_ms, targetsThree}}; + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(21)); // std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -322,8 +322,8 @@ TEST(PhotonPoseEstimatorTest, AverageBestPoses) { 0.4, corners, detectedCorners}}; cameraOne.test = true; - cameraOne.testResult = {{0,0_s, 2_ms, targets}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(15)); + cameraOne.testResult = {{0, 0_s, 2_ms, targets}}; + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(15)); photon::PhotonPoseEstimator estimator(aprilTags, photon::AVERAGE_BEST_TARGETS, {}); @@ -374,8 +374,8 @@ TEST(PhotonPoseEstimatorTest, PoseCache) { {}); // empty input, expect empty out - cameraOne.testResult = {{0,0_s, 2_ms, {}}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(1)); + cameraOne.testResult = {{0, 0_s, 2_ms, {}}}; + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(1)); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -385,8 +385,8 @@ TEST(PhotonPoseEstimatorTest, PoseCache) { EXPECT_FALSE(estimatedPose); // Set result, and update -- expect present and timestamp to be 15 - cameraOne.testResult = {{0,0_s, 3_ms, {}}}; - cameraOne.testResult[0].SetTimestamp(units::second_t(15)); + cameraOne.testResult = {{0, 0_s, 3_ms, {}}}; + cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(15)); for (const auto& result : cameraOne.GetAllUnreadResults()) { estimatedPose = estimator.Update(result); From 819a5aeeb782c1384a589def28cf4f225383c4c8 Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 10 May 2024 11:21:03 -0700 Subject: [PATCH 03/10] Update PhotonPoseEstimatorTest.cpp --- photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp index f42fad8b34..90f53b1f84 100644 --- a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp +++ b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp @@ -385,7 +385,7 @@ TEST(PhotonPoseEstimatorTest, PoseCache) { EXPECT_FALSE(estimatedPose); // Set result, and update -- expect present and timestamp to be 15 - cameraOne.testResult = {{0, 0_s, 3_ms, {}}}; + cameraOne.testResult = {{0, 0_s, 3_ms, targets}}; cameraOne.testResult[0].SetRecieveTimestamp(units::second_t(15)); for (const auto& result : cameraOne.GetAllUnreadResults()) { From 0f5013530ebf6fbb409f8171934c7c6cbba2d246 Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 10 May 2024 11:23:00 -0700 Subject: [PATCH 04/10] Run lint --- photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp | 3 ++- .../src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp index 9e418dbe2d..7e89407891 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp @@ -157,7 +157,8 @@ std::vector PhotonCamera::GetAllUnreadResults() { PhotonPipelineResult& result = ret[i]; packet >> result; - result.SetRecieveTimestamp(units::microsecond_t(value.time) - result.GetLatency()); + result.SetRecieveTimestamp(units::microsecond_t(value.time) - + result.GetLatency()); } return ret; diff --git a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp index a3e57f2b75..22075132d4 100644 --- a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp +++ b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp @@ -327,7 +327,8 @@ PhotonPipelineResult PhotonCameraSim::Process( } heartbeatCounter++; - return PhotonPipelineResult{heartbeatCounter, 0_s, latency, detectableTgts, multiTagResults}; + return PhotonPipelineResult{heartbeatCounter, 0_s, latency, detectableTgts, + multiTagResults}; } void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result) { SubmitProcessedFrame(result, wpi::Now()); From 6c9f0479eca7d0d6b39c94c0a02672c82ab33a1b Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 10 May 2024 11:51:38 -0700 Subject: [PATCH 05/10] Update Vision.h --- .../src/main/include/Vision.h | 46 ++++++++++--------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/photonlib-cpp-examples/swervedriveposeestsim/src/main/include/Vision.h b/photonlib-cpp-examples/swervedriveposeestsim/src/main/include/Vision.h index 0caeae102c..0253f5c4cd 100644 --- a/photonlib-cpp-examples/swervedriveposeestsim/src/main/include/Vision.h +++ b/photonlib-cpp-examples/swervedriveposeestsim/src/main/include/Vision.h @@ -58,37 +58,37 @@ class Vision { cameraProp->SetAvgLatency(50_ms); cameraProp->SetLatencyStdDev(15_ms); - cameraSim = std::make_shared(camera.get(), - *cameraProp.get()); + cameraSim = + std::make_shared(&camera, *cameraProp.get()); visionSim->AddCamera(cameraSim.get(), constants::Vision::kRobotToCam); cameraSim->EnableDrawWireframe(true); } } - photon::PhotonPipelineResult GetLatestResult() { - return camera->GetLatestResult(); - } + photon::PhotonPipelineResult GetLatestResult() { return m_latestResult; } std::optional GetEstimatedGlobalPose() { - auto visionEst = photonEstimator.Update(); - units::second_t latestTimestamp = camera->GetLatestResult().GetTimestamp(); - bool newResult = - units::math::abs(latestTimestamp - lastEstTimestamp) > 1e-5_s; - if (frc::RobotBase::IsSimulation()) { - if (visionEst.has_value()) { - GetSimDebugField() - .GetObject("VisionEstimation") - ->SetPose(visionEst.value().estimatedPose.ToPose2d()); - } else { - if (newResult) { + std::optional visionEst; + + // Run each new pipeline result through our pose estimator + for (const auto& result : camera.GetAllUnreadResults()) { + // cache result and update pose estimator + auto visionEst = photonEstimator.Update(result); + m_latestResult = result; + + // In sim only, add our vision estimate to the sim debug field + if (frc::RobotBase::IsSimulation()) { + if (visionEst.has_value()) { + GetSimDebugField() + .GetObject("VisionEstimation") + ->SetPose(visionEst.value().estimatedPose.ToPose2d()); + } else { GetSimDebugField().GetObject("VisionEstimation")->SetPoses({}); } } } - if (newResult) { - lastEstTimestamp = latestTimestamp; - } + return visionEst; } @@ -141,10 +141,12 @@ class Vision { photon::PhotonPoseEstimator photonEstimator{ constants::Vision::kTagLayout, photon::PoseStrategy::MULTI_TAG_PNP_ON_COPROCESSOR, - photon::PhotonCamera{"photonvision"}, constants::Vision::kRobotToCam}; - std::shared_ptr camera{photonEstimator.GetCamera()}; + constants::Vision::kRobotToCam}; + photon::PhotonCamera camera{"photonvision"}; std::unique_ptr visionSim; std::unique_ptr cameraProp; std::shared_ptr cameraSim; - units::second_t lastEstTimestamp{0_s}; + + // The most recent result, cached for calculating std devs + photon::PhotonPipelineResult m_latestResult; }; From f0b6de54e60f1ac0110242587a7881c827981c71 Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 10 May 2024 11:56:46 -0700 Subject: [PATCH 06/10] Fix sim by hacking things in --- .../main/native/cpp/photon/simulation/PhotonCameraSim.cpp | 5 +++-- .../main/native/include/photon/simulation/VisionTargetSim.h | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp index 22075132d4..f68bc887d1 100644 --- a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp +++ b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp @@ -217,11 +217,12 @@ PhotonPipelineResult PhotonCameraSim::Process( std::vector> cornersDouble{cornersFloat.begin(), cornersFloat.end()}; - detectableTgts.emplace_back(PhotonTrackedTarget{ + detectableTgts.emplace_back( -centerRot.Z().convert().to(), -centerRot.Y().convert().to(), areaPercent, centerRot.X().convert().to(), tgt.fiducialId, - pnpSim.best, pnpSim.alt, pnpSim.ambiguity, smallVec, cornersDouble}); + tgt.objDetClassId, tgt.objDetConf, pnpSim.best, pnpSim.alt, + pnpSim.ambiguity, smallVec, cornersDouble); } if (videoSimRawEnabled) { diff --git a/photon-lib/src/main/native/include/photon/simulation/VisionTargetSim.h b/photon-lib/src/main/native/include/photon/simulation/VisionTargetSim.h index f8d4d6676b..a0d04ab789 100644 --- a/photon-lib/src/main/native/include/photon/simulation/VisionTargetSim.h +++ b/photon-lib/src/main/native/include/photon/simulation/VisionTargetSim.h @@ -46,6 +46,9 @@ class VisionTargetSim { } int fiducialId; + int objDetClassId = -1; + float objDetConf = -1; + bool operator<(const VisionTargetSim& right) const { return pose.Translation().Norm() < right.pose.Translation().Norm(); } From 56ea44dd4fa373d66df19ba1fbedcd30f1316ff6 Mon Sep 17 00:00:00 2001 From: Matt Date: Sat, 3 Aug 2024 20:47:39 -0700 Subject: [PATCH 07/10] run lint --- .../main/native/cpp/photon/simulation/PhotonCameraSim.cpp | 5 +++++ .../native/cpp/photon/simulation/SimCameraProperties.cpp | 4 ++++ photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp | 3 ++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp index f68bc887d1..381e99c9c1 100644 --- a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp +++ b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp @@ -24,6 +24,11 @@ #include "photon/simulation/PhotonCameraSim.h" +#include +#include +#include +#include + namespace photon { PhotonCameraSim::PhotonCameraSim(PhotonCamera* camera) : PhotonCameraSim(camera, photon::SimCameraProperties::PERFECT_90DEG()) {} diff --git a/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp b/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp index b8ea27d38e..1133a60891 100644 --- a/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp +++ b/photon-lib/src/main/native/cpp/photon/simulation/SimCameraProperties.cpp @@ -24,6 +24,10 @@ #include "photon/simulation/SimCameraProperties.h" +#include +#include +#include + using namespace photon; void SimCameraProperties::SetCalibration(int width, int height, diff --git a/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp b/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp index 2b54fd5505..e223512dd6 100644 --- a/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp +++ b/photon-lib/src/test/native/cpp/VisionSystemSimTest.cpp @@ -22,12 +22,13 @@ * SOFTWARE. */ -#include #include #include #include #include +#include + #include "gtest/gtest.h" #include "photon/PhotonUtils.h" #include "photon/simulation/VisionSystemSim.h" From 8149be18e6469cce0dea1cfc42b2faefbd409d3a Mon Sep 17 00:00:00 2001 From: Matt Date: Sat, 3 Aug 2024 21:53:40 -0700 Subject: [PATCH 08/10] Clarify docstrings --- .../java/org/photonvision/PhotonCamera.java | 2 ++ .../org/photonvision/PhotonPoseEstimator.java | 36 ++++++++++++------- .../main/native/cpp/photon/PhotonCamera.cpp | 2 ++ 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java index efb0d8c51b..38980d788e 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java @@ -189,6 +189,8 @@ public List getAllUnreadResults() { var changes = resultSubscriber.getAllChanges(); + // TODO: NT4 timestamps are still not to be trusted. But it's the best we can do until we can + // make time sync more reliable. for (var c : changes) { var result = c.value; result.setRecieveTimestampMicros(c.timestamp); diff --git a/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java b/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java index d0a37db0d9..ca43fca4bb 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java @@ -284,9 +284,11 @@ public void setRobotToCameraTransform(Transform3d robotToCamera) { *

  • The timestamp of the provided pipeline result is the same as in the previous call to * {@code update()}. *
  • No targets were found in the pipeline results. - *
  • Strategy is multi-tag on coprocessor * * + * Will report a warning if strategy is multi-tag-on-rio, but camera calibration data is not + * provided + * * @param cameraResult The latest pipeline result from the camera * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to * create the estimate. @@ -304,10 +306,10 @@ public Optional update(PhotonPipelineResult cameraResult) { *
  • No targets were found in the pipeline results. * * - * @param cameraMatrix Camera calibration data that can be used in the case of no assigned - * PhotonCamera. - * @param distCoeffs Camera calibration data that can be used in the case of no assigned - * PhotonCamera + * @param cameraMatrix Camera calibration data for multi-tag-on-rio strategy - can be empty + * otherwise + * @param distCoeffs Camera calibration data for multi-tag-on-rio strategy - can be empty + * otherwise * @return an {@link EstimatedRobotPose} with an estimated pose, timestamp, and targets used to * create the estimate. */ @@ -344,7 +346,7 @@ private Optional update( Optional> cameraMatrix, Optional> distCoeffs, PoseStrategy strat) { - Optional estimatedPose; + Optional estimatedPose = Optional.empty(); switch (strat) { case LOWEST_AMBIGUITY: estimatedPose = lowestAmbiguityStrategy(cameraResult); @@ -363,10 +365,20 @@ private Optional update( estimatedPose = averageBestTargetsStrategy(cameraResult); break; case MULTI_TAG_PNP_ON_RIO: - estimatedPose = multiTagOnRioStrategy(cameraResult, cameraMatrix, distCoeffs); + if (cameraMatrix.isEmpty()) { + DriverStation.reportWarning( + "Camera matrix is empty for multi-tag-on-rio", + Thread.currentThread().getStackTrace()); + } else if (distCoeffs.isEmpty()) { + DriverStation.reportWarning( + "Camera matrix is empty for multi-tag-on-rio", + Thread.currentThread().getStackTrace()); + } else { + estimatedPose = multiTagOnRioStrategy(cameraResult, cameraMatrix, distCoeffs); + } break; case MULTI_TAG_PNP_ON_COPROCESSOR: - estimatedPose = multiTagOnCoprocStrategy(cameraResult, cameraMatrix, distCoeffs); + estimatedPose = multiTagOnCoprocStrategy(cameraResult); break; default: DriverStation.reportError( @@ -381,10 +393,7 @@ private Optional update( return estimatedPose; } - private Optional multiTagOnCoprocStrategy( - PhotonPipelineResult result, - Optional> cameraMatrixOpt, - Optional> distCoeffsOpt) { + private Optional multiTagOnCoprocStrategy(PhotonPipelineResult result) { if (result.getMultiTagResult().estimatedPose.isPresent) { var best_tf = result.getMultiTagResult().estimatedPose.best; var best = @@ -399,7 +408,8 @@ private Optional multiTagOnCoprocStrategy( result.getTargets(), PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR)); } else { - return update(result, cameraMatrixOpt, distCoeffsOpt, this.multiTagFallbackStrategy); + // We can nver fall back on another multitag strategy + return update(result, Optional.empty(), Optional.empty(), this.multiTagFallbackStrategy); } } diff --git a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp index 93d5cdf961..97bfbf6676 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp @@ -164,6 +164,8 @@ std::vector PhotonCamera::GetAllUnreadResults() { PhotonPipelineResult& result = ret[i]; packet >> result; + // TODO: NT4 timestamps are still not to be trusted. But it's the best we + // can do until we can make time sync more reliable. result.SetRecieveTimestamp(units::microsecond_t(value.time) - result.GetLatency()); } From 52ff72605a1c85de1284d3bcf12ac5f681435b22 Mon Sep 17 00:00:00 2001 From: Matt Date: Sun, 4 Aug 2024 09:52:12 -0700 Subject: [PATCH 09/10] Add check --- .../main/native/cpp/photon/PhotonPoseEstimator.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp index 6da5edd5e5..9eb039557e 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp @@ -146,11 +146,16 @@ std::optional PhotonPoseEstimator::Update( ret = AverageBestTargetsStrategy(result); break; case MULTI_TAG_PNP_ON_COPROCESSOR: - ret = - MultiTagOnCoprocStrategy(result, cameraMatrixData, cameraDistCoeffs); + ret = MultiTagOnCoprocStrategy(result); break; case MULTI_TAG_PNP_ON_RIO: - ret = MultiTagOnRioStrategy(result, cameraMatrixData, cameraDistCoeffs); + if (cameraMatrixData && cameraDistCoeffs) { + ret = MultiTagOnRioStrategy(result, cameraMatrixData, cameraDistCoeffs); + } else { + FRC_ReportError(frc::warn::Warning, + "No camera calibration provided to multi-tag-on-rio!", + ""); + } break; default: FRC_ReportError(frc::warn::Warning, "Invalid Pose Strategy selected!", From 131d88a911ae4ab2576aed166bd2cdf1b6951b74 Mon Sep 17 00:00:00 2001 From: Matt Date: Sun, 4 Aug 2024 09:58:49 -0700 Subject: [PATCH 10/10] oops --- photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp | 4 +--- .../src/main/native/include/photon/PhotonPoseEstimator.h | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp index 9eb039557e..41e577eaa8 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp @@ -348,9 +348,7 @@ frc::Pose3d detail::ToPose3d(const cv::Mat& tvec, const cv::Mat& rvec) { } std::optional PhotonPoseEstimator::MultiTagOnCoprocStrategy( - PhotonPipelineResult result, - std::optional camMat, - std::optional distCoeffs) { + PhotonPipelineResult result) { if (result.MultiTagResult().result.isPresent) { const auto field2camera = result.MultiTagResult().result.best; diff --git a/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h b/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h index 7b5ab9eb0a..37e3822f3e 100644 --- a/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h +++ b/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h @@ -249,9 +249,7 @@ class PhotonPoseEstimator { * @return the estimated position of the robot in the FCS */ std::optional MultiTagOnCoprocStrategy( - PhotonPipelineResult result, - std::optional camMat, - std::optional distCoeffs); + PhotonPipelineResult result); /** * Return the pose calculation using all targets in view in the same PNP()