diff --git a/photon-core/src/main/java/org/photonvision/common/hardware/metrics/cmds/LinuxCmds.java b/photon-core/src/main/java/org/photonvision/common/hardware/metrics/cmds/LinuxCmds.java index d151b22780..1a7d18f35e 100644 --- a/photon-core/src/main/java/org/photonvision/common/hardware/metrics/cmds/LinuxCmds.java +++ b/photon-core/src/main/java/org/photonvision/common/hardware/metrics/cmds/LinuxCmds.java @@ -32,7 +32,7 @@ public void initCmds(HardwareConfig config) { cpuUptimeCommand = "uptime -p | cut -c 4-"; // RAM - ramUsageCommand = "awk '/MemFree:/ {print int($2 / 1000);}' /proc/meminfo"; + ramUsageCommand = "awk '/MemAvailable:/ {print int($2 / 1000);}' /proc/meminfo"; // Disk diskUsageCommand = "df ./ --output=pcent | tail -n +2"; diff --git a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/FilterContoursPipe.java b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/FilterContoursPipe.java index 336dfef7a6..1e7616c44f 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/FilterContoursPipe.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/FilterContoursPipe.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; - import org.opencv.core.RotatedRect; import org.photonvision.common.util.numbers.DoubleCouple; import org.photonvision.vision.frame.FrameStaticProperties; @@ -47,15 +46,6 @@ protected List process(List in) { private void rejectOutliers(List list, double xTol, double yTol) { if (list.size() < 2) return; // Must have at least 2 points to reject outliers -/* - // Sort by X and find median - list.sort(Comparator.comparingDouble(c -> c.getCenterPoint().x)); - - double medianX = list.get(list.size() / 2).getCenterPoint().x; - if (list.size() % 2 == 0) - medianX = (medianX + list.get(list.size() / 2 - 1).getCenterPoint().x) / 2; -*/ - double meanX = list.stream().mapToDouble(it -> it.getCenterPoint().x).sum() / list.size(); double stdDevX = @@ -63,15 +53,6 @@ private void rejectOutliers(List list, double xTol, double yTol) { stdDevX /= (list.size() - 1); stdDevX = Math.sqrt(stdDevX); -/* - // Sort by Y and find median - list.sort(Comparator.comparingDouble(c -> c.getCenterPoint().y)); - - double medianY = list.get(list.size() / 2).getCenterPoint().y; - if (list.size() % 2 == 0) - medianY = (medianY + list.get(list.size() / 2 - 1).getCenterPoint().y) / 2; -*/ - double meanY = list.stream().mapToDouble(it -> it.getCenterPoint().y).sum() / list.size(); double stdDevY = @@ -112,7 +93,8 @@ private void filterContour(Contour contour) { if (contourArea <= minFullness || contourArea >= maxFullness) return; // Aspect Ratio Filtering. - double aspectRatio = TargetCalculations.getAspectRatio(contour.getMinAreaRect(), params.isLandscape); + double aspectRatio = + TargetCalculations.getAspectRatio(contour.getMinAreaRect(), params.isLandscape); if (aspectRatio < params.getRatio().getFirst() || aspectRatio > params.getRatio().getSecond()) return; @@ -134,7 +116,8 @@ public FilterContoursParams( DoubleCouple extent, FrameStaticProperties camProperties, double xTol, - double yTol, boolean isLandscape) { + double yTol, + boolean isLandscape) { this.m_area = area; this.m_ratio = ratio; this.m_fullness = extent; diff --git a/photon-lib/src/main/java/org/photonvision/estimation/OpenCVHelp.java b/photon-lib/src/main/java/org/photonvision/estimation/OpenCVHelp.java index 98dbb5b2e3..e2395db6e5 100644 --- a/photon-lib/src/main/java/org/photonvision/estimation/OpenCVHelp.java +++ b/photon-lib/src/main/java/org/photonvision/estimation/OpenCVHelp.java @@ -28,7 +28,6 @@ import edu.wpi.first.math.Nat; import edu.wpi.first.math.Num; import edu.wpi.first.math.Vector; -import edu.wpi.first.math.geometry.CoordinateSystem; import edu.wpi.first.math.geometry.Pose3d; import edu.wpi.first.math.geometry.Rotation3d; import edu.wpi.first.math.geometry.Transform3d; @@ -57,6 +56,9 @@ import org.photonvision.targeting.TargetCorner; public final class OpenCVHelp { + private static RotTrlTransform3d NWU_TO_EDN; + private static RotTrlTransform3d EDN_TO_NWU; + static { try { var loader = @@ -66,15 +68,21 @@ public final class OpenCVHelp { } catch (Exception e) { throw new RuntimeException("Failed to load native libraries!", e); } + + NWU_TO_EDN = + new RotTrlTransform3d( + new Rotation3d(Matrix.mat(Nat.N3(), Nat.N3()).fill(0, -1, 0, 0, 0, -1, 1, 0, 0)), + new Translation3d()); + EDN_TO_NWU = + new RotTrlTransform3d( + new Rotation3d(Matrix.mat(Nat.N3(), Nat.N3()).fill(0, 0, 1, -1, 0, 0, 0, -1, 0)), + new Translation3d()); } - public static MatOfDouble matrixToMat(SimpleMatrix matrix) { + public static Mat matrixToMat(SimpleMatrix matrix) { var mat = new Mat(matrix.numRows(), matrix.numCols(), CvType.CV_64F); mat.put(0, 0, matrix.getDDRM().getData()); - var wrappedMat = new MatOfDouble(); - mat.convertTo(wrappedMat, CvType.CV_64F); - mat.release(); - return wrappedMat; + return mat; } public static Matrix matToMatrix(Mat mat) { @@ -94,8 +102,7 @@ public static Matrix matToMatrix(Mat mat) { public static MatOfPoint3f translationToTvec(Translation3d... translations) { Point3[] points = new Point3[translations.length]; for (int i = 0; i < translations.length; i++) { - var trl = - CoordinateSystem.convert(translations[i], CoordinateSystem.NWU(), CoordinateSystem.EDN()); + var trl = translationNWUtoEDN(translations[i]); points[i] = new Point3(trl.getX(), trl.getY(), trl.getZ()); } return new MatOfPoint3f(points); @@ -113,10 +120,7 @@ public static Translation3d tvecToTranslation(Mat tvecInput) { tvecInput.convertTo(wrapped, CvType.CV_32F); wrapped.get(0, 0, data); wrapped.release(); - return CoordinateSystem.convert( - new Translation3d(data[0], data[1], data[2]), - CoordinateSystem.EDN(), - CoordinateSystem.NWU()); + return translationEDNtoNWU(new Translation3d(data[0], data[1], data[2])); } /** @@ -151,42 +155,46 @@ public static Rotation3d rvecToRotation(Mat rvecInput) { return rotationEDNtoNWU(new Rotation3d(axis.div(axis.norm()), axis.norm())); } - public static TargetCorner averageCorner(List corners) { - if (corners == null || corners.size() == 0) return null; - - var pointMat = targetCornersToMat(corners); + public static Point avgPoint(Point[] points) { + if (points == null || points.length == 0) return null; + var pointMat = new MatOfPoint2f(points); Core.reduce(pointMat, pointMat, 0, Core.REDUCE_AVG); - var avgPt = matToTargetCorners(pointMat)[0]; + var avgPt = pointMat.toArray()[0]; pointMat.release(); return avgPt; } - public static MatOfPoint2f targetCornersToMat(List corners) { - return targetCornersToMat(corners.toArray(TargetCorner[]::new)); + public static Point[] cornersToPoints(List corners) { + var points = new Point[corners.size()]; + for (int i = 0; i < corners.size(); i++) { + var corn = corners.get(i); + points[i] = new Point(corn.x, corn.y); + } + return points; } - public static MatOfPoint2f targetCornersToMat(TargetCorner... corners) { + public static Point[] cornersToPoints(TargetCorner... corners) { var points = new Point[corners.length]; for (int i = 0; i < corners.length; i++) { points[i] = new Point(corners[i].x, corners[i].y); } - return new MatOfPoint2f(points); + return points; } - public static TargetCorner[] pointsToTargetCorners(Point... points) { - var corners = new TargetCorner[points.length]; + public static List pointsToCorners(Point... points) { + var corners = new ArrayList(points.length); for (int i = 0; i < points.length; i++) { - corners[i] = new TargetCorner(points[i].x, points[i].y); + corners.add(new TargetCorner(points[i].x, points[i].y)); } return corners; } - public static TargetCorner[] matToTargetCorners(MatOfPoint2f matInput) { - var corners = new TargetCorner[(int) matInput.total()]; + public static List pointsToCorners(MatOfPoint2f matInput) { + var corners = new ArrayList(); float[] data = new float[(int) matInput.total() * matInput.channels()]; matInput.get(0, 0, data); - for (int i = 0; i < corners.length; i++) { - corners[i] = new TargetCorner(data[0 + 2 * i], data[1 + 2 * i]); + for (int i = 0; i < (int) matInput.total(); i++) { + corners.add(new TargetCorner(data[0 + 2 * i], data[1 + 2 * i])); } return corners; } @@ -222,24 +230,39 @@ public static List reorderCircular(List elements, boolean backwards, i return reordered; } + // TODO: RotTrlTransform3d removal awaiting Rotation3d performance improvements /** - * Convert a rotation from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0} in EDN, - * this would be XYZ {0, -1, 0} in NWU. + * Convert a rotation delta from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0} + * in EDN, this would be {0, -1, 0} in NWU. */ private static Rotation3d rotationEDNtoNWU(Rotation3d rot) { - return CoordinateSystem.convert( - new Rotation3d(), CoordinateSystem.NWU(), CoordinateSystem.EDN()) - .plus(CoordinateSystem.convert(rot, CoordinateSystem.EDN(), CoordinateSystem.NWU())); + return new RotTrlTransform3d(EDN_TO_NWU.apply(rot), new Translation3d()) + .apply(EDN_TO_NWU.inverse().getRotation()); } /** - * Convert a rotation from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0} in EDN, - * this would be XYZ {0, -1, 0} in NWU. + * Convert a rotation delta from NWU to EDN. For example, if you have a rotation X,Y,Z {1, 0, 0} + * in NWU, this would be {0, 0, 1} in EDN. */ private static Rotation3d rotationNWUtoEDN(Rotation3d rot) { - return CoordinateSystem.convert( - new Rotation3d(), CoordinateSystem.EDN(), CoordinateSystem.NWU()) - .plus(CoordinateSystem.convert(rot, CoordinateSystem.NWU(), CoordinateSystem.EDN())); + return new RotTrlTransform3d(NWU_TO_EDN.apply(rot), new Translation3d()) + .apply(NWU_TO_EDN.inverse().getRotation()); + } + + /** + * Convert a translation from EDN to NWU. For example, if you have a translation X,Y,Z {1, 0, 0} + * in EDN, this would be {0, -1, 0} in NWU. + */ + private static Translation3d translationEDNtoNWU(Translation3d trl) { + return EDN_TO_NWU.apply(trl); + } + + /** + * Convert a translation from NWU to EDN. For example, if you have a translation X,Y,Z {1, 0, 0} + * in NWU, this would be {0, 0, 1} in EDN. + */ + private static Translation3d translationNWUtoEDN(Translation3d trl) { + return NWU_TO_EDN.apply(trl); } /** @@ -248,29 +271,27 @@ private static Rotation3d rotationNWUtoEDN(Rotation3d rot) { * * @param cameraMatrix the camera intrinsics matrix in standard opencv form * @param distCoeffs the camera distortion matrix in standard opencv form - * @param camPose The current camera pose in the 3d world + * @param camRt The change in basis from world coordinates to camera coordinates. See {@link + * RotTrlTransform3d#makeRelativeTo(Pose3d)}. * @param objectTranslations The 3d points to be projected - * @return The 2d points in pixels which correspond to the image of the 3d points on the camera + * @return The 2d points in pixels which correspond to the camera's image of the 3d points */ - public static List projectPoints( + public static Point[] projectPoints( Matrix cameraMatrix, Matrix distCoeffs, - Pose3d camPose, + RotTrlTransform3d camRt, List objectTranslations) { // translate to opencv classes var objectPoints = translationToTvec(objectTranslations.toArray(new Translation3d[0])); // opencv rvec/tvec describe a change in basis from world to camera - var basisChange = RotTrlTransform3d.makeRelativeTo(camPose); - var rvec = rotationToRvec(basisChange.getRotation()); - var tvec = translationToTvec(basisChange.getTranslation()); + var rvec = rotationToRvec(camRt.getRotation()); + var tvec = translationToTvec(camRt.getTranslation()); var cameraMatrixMat = matrixToMat(cameraMatrix.getStorage()); - var distCoeffsMat = matrixToMat(distCoeffs.getStorage()); + var distCoeffsMat = new MatOfDouble(matrixToMat(distCoeffs.getStorage())); var imagePoints = new MatOfPoint2f(); // project to 2d Calib3d.projectPoints(objectPoints, rvec, tvec, cameraMatrixMat, distCoeffsMat, imagePoints); - - // turn 2d point Mat into TargetCorners - var corners = matToTargetCorners(imagePoints); + var points = imagePoints.toArray(); // release our Mats from native memory objectPoints.release(); @@ -280,37 +301,37 @@ public static List projectPoints( distCoeffsMat.release(); imagePoints.release(); - return Arrays.asList(corners); + return points; } /** * Undistort 2d image points using a given camera's intrinsics and distortion. * - *

2d image points from projectPoints(CameraProperties, Pose3d, List) projectPoints will - * naturally be distorted, so this operation is important if the image points need to be directly - * used (e.g. 2d yaw/pitch). + *

2d image points from {@link #projectPoints(Matrix, Matrix, RotTrlTransform3d, List) + * projectPoints()} will naturally be distorted, so this operation is important if the image + * points need to be directly used (e.g. 2d yaw/pitch). * - * @param cameraMatrix the camera intrinsics matrix in standard opencv form - * @param distCoeffs the camera distortion matrix in standard opencv form - * @param corners The distorted image points + * @param cameraMatrix The camera intrinsics matrix in standard opencv form + * @param distCoeffs The camera distortion matrix in standard opencv form + * @param points The distorted image points * @return The undistorted image points */ - public static List undistortPoints( - Matrix cameraMatrix, Matrix distCoeffs, List corners) { - var points_in = targetCornersToMat(corners); - var points_out = new MatOfPoint2f(); + public static Point[] undistortPoints( + Matrix cameraMatrix, Matrix distCoeffs, Point[] points) { + var distMat = new MatOfPoint2f(points); + var undistMat = new MatOfPoint2f(); var cameraMatrixMat = matrixToMat(cameraMatrix.getStorage()); var distCoeffsMat = matrixToMat(distCoeffs.getStorage()); - Calib3d.undistortImagePoints(points_in, points_out, cameraMatrixMat, distCoeffsMat); - var corners_out = matToTargetCorners(points_out); + Calib3d.undistortImagePoints(distMat, undistMat, cameraMatrixMat, distCoeffsMat); + var undistPoints = undistMat.toArray(); - points_in.release(); - points_out.release(); + distMat.release(); + undistMat.release(); cameraMatrixMat.release(); distCoeffsMat.release(); - return Arrays.asList(corners_out); + return undistPoints; } /** @@ -319,58 +340,52 @@ public static List undistortPoints( *

Note that rectangle size and position are stored with ints and do not have sub-pixel * accuracy. * - * @param corners The corners/points to be bounded - * @return Rectangle bounding the given corners + * @param points The points to be bounded + * @return Rectangle bounding the given points */ - public static Rect getBoundingRect(List corners) { - var corn = targetCornersToMat(corners); - var rect = Imgproc.boundingRect(corn); - corn.release(); + public static Rect getBoundingRect(Point[] points) { + var pointMat = new MatOfPoint2f(points); + var rect = Imgproc.boundingRect(pointMat); + pointMat.release(); return rect; } /** * Gets the rotated rectangle with minimum area which bounds this contour. * - *

Note that rectangle size and position are stored with doubles and have sub-pixel accuracy. + *

Note that rectangle size and position are stored with floats and have sub-pixel accuracy. * - * @param corners The corners/points to be bounded - * @return Rotated rectangle bounding the given corners + * @param points The points to be bounded + * @return Rotated rectangle bounding the given points */ - public static RotatedRect getMinAreaRect(List corners) { - var corn = targetCornersToMat(corners); - var rect = Imgproc.minAreaRect(corn); - corn.release(); + public static RotatedRect getMinAreaRect(Point[] points) { + var pointMat = new MatOfPoint2f(points); + var rect = Imgproc.minAreaRect(pointMat); + pointMat.release(); return rect; } /** - * Get the area in pixels of this target's contour. It's important to note that this may be - * different from the area of the bounding rectangle around the contour. + * Gets the convex hull contour (the outline) of a list of points. * - * @param corners The corners defining this contour - * @return Area in pixels (units of corner x/y) + * @param points The input contour + * @return The subset of points defining the convex hull. Note that these use ints and not floats. */ - public static double getContourAreaPx(List corners) { - var temp = targetCornersToMat(corners); - var corn = new MatOfPoint(temp.toArray()); - temp.release(); - + public static Point[] getConvexHull(Point[] points) { + var pointMat = new MatOfPoint(points); // outputHull gives us indices (of corn) that make a convex hull contour var outputHull = new MatOfInt(); - Imgproc.convexHull(corn, outputHull); + + Imgproc.convexHull(pointMat, outputHull); + int[] indices = outputHull.toArray(); outputHull.release(); - var tempPoints = corn.toArray(); - var points = tempPoints.clone(); + pointMat.release(); + var convexPoints = new Point[indices.length]; for (int i = 0; i < indices.length; i++) { - points[i] = tempPoints[indices[i]]; + convexPoints[i] = points[indices[i]]; } - corn.fromArray(points); - // calculate area of the (convex hull) contour - double area = Imgproc.contourArea(corn); - corn.release(); - return area; + return convexPoints; } /** @@ -397,8 +412,8 @@ public static double getContourAreaPx(List corners) { *

  • Point 3: [0, -squareLength / 2, -squareLength / 2] * * - * @param imageCorners The projection of these 3d object points into the 2d camera image. The - * order should match the given object point translations. + * @param imagePoints The projection of these 3d object points into the 2d camera image. The order + * should match the given object point translations. * @return The resulting transformation that maps the camera pose to the target pose and the * ambiguity if an alternate solution is available. */ @@ -406,10 +421,10 @@ public static PNPResults solvePNP_SQUARE( Matrix cameraMatrix, Matrix distCoeffs, List modelTrls, - List imageCorners) { + Point[] imagePoints) { // solvepnp inputs - MatOfPoint3f objectPoints = new MatOfPoint3f(); - MatOfPoint2f imagePoints = new MatOfPoint2f(); + MatOfPoint3f objectMat = new MatOfPoint3f(); + MatOfPoint2f imageMat = new MatOfPoint2f(); MatOfDouble cameraMatrixMat = new MatOfDouble(); MatOfDouble distCoeffsMat = new MatOfDouble(); var rvecs = new ArrayList(); @@ -420,10 +435,10 @@ public static PNPResults solvePNP_SQUARE( try { // IPPE_SQUARE expects our corners in a specific order modelTrls = reorderCircular(modelTrls, true, -1); - imageCorners = reorderCircular(imageCorners, true, -1); + imagePoints = reorderCircular(Arrays.asList(imagePoints), true, -1).toArray(Point[]::new); // translate to opencv classes - translationToTvec(modelTrls.toArray(new Translation3d[0])).assignTo(objectPoints); - targetCornersToMat(imageCorners).assignTo(imagePoints); + translationToTvec(modelTrls.toArray(new Translation3d[0])).assignTo(objectMat); + imageMat.fromArray(imagePoints); matrixToMat(cameraMatrix.getStorage()).assignTo(cameraMatrixMat); matrixToMat(distCoeffs.getStorage()).assignTo(distCoeffsMat); @@ -434,8 +449,8 @@ public static PNPResults solvePNP_SQUARE( for (int tries = 0; tries < 2; tries++) { // calc rvecs/tvecs and associated reprojection error from image points Calib3d.solvePnPGeneric( - objectPoints, - imagePoints, + objectMat, + imageMat, cameraMatrixMat, distCoeffsMat, rvecs, @@ -457,10 +472,10 @@ public static PNPResults solvePNP_SQUARE( // check if we got a NaN result if (!Double.isNaN(errors[0])) break; else { // add noise and retry - double[] br = imagePoints.get(0, 0); + double[] br = imageMat.get(0, 0); br[0] -= 0.001; br[1] -= 0.001; - imagePoints.put(0, 0, br); + imageMat.put(0, 0, br); } } @@ -478,8 +493,8 @@ public static PNPResults solvePNP_SQUARE( return new PNPResults(); } finally { // release our Mats from native memory - objectPoints.release(); - imagePoints.release(); + objectMat.release(); + imageMat.release(); cameraMatrixMat.release(); distCoeffsMat.release(); for (var v : rvecs) v.release(); @@ -504,8 +519,8 @@ public static PNPResults solvePNP_SQUARE( * @param cameraMatrix The camera intrinsics matrix in standard opencv form * @param distCoeffs The camera distortion matrix in standard opencv form * @param objectTrls The translations of the object corners, relative to the field. - * @param imageCorners The projection of these 3d object points into the 2d camera image. The - * order should match the given object point translations. + * @param imagePoints The projection of these 3d object points into the 2d camera image. The order + * should match the given object point translations. * @return The resulting transformation that maps the camera pose to the target pose. If the 3d * model points are supplied relative to the origin, this transformation brings the camera to * the origin. @@ -514,13 +529,13 @@ public static PNPResults solvePNP_SQPNP( Matrix cameraMatrix, Matrix distCoeffs, List objectTrls, - List imageCorners) { + Point[] imagePoints) { try { // translate to opencv classes - MatOfPoint3f objectPoints = translationToTvec(objectTrls.toArray(new Translation3d[0])); - MatOfPoint2f imagePoints = targetCornersToMat(imageCorners); - MatOfDouble cameraMatrixMat = matrixToMat(cameraMatrix.getStorage()); - MatOfDouble distCoeffsMat = matrixToMat(distCoeffs.getStorage()); + MatOfPoint3f objectMat = translationToTvec(objectTrls.toArray(new Translation3d[0])); + MatOfPoint2f imageMat = new MatOfPoint2f(imagePoints); + Mat cameraMatrixMat = matrixToMat(cameraMatrix.getStorage()); + Mat distCoeffsMat = matrixToMat(distCoeffs.getStorage()); var rvecs = new ArrayList(); var tvecs = new ArrayList(); Mat rvec = Mat.zeros(3, 1, CvType.CV_32F); @@ -528,8 +543,8 @@ public static PNPResults solvePNP_SQPNP( Mat reprojectionError = new Mat(); // calc rvec/tvec from image points Calib3d.solvePnPGeneric( - objectPoints, - imagePoints, + objectMat, + imageMat, cameraMatrixMat, distCoeffsMat, rvecs, @@ -546,8 +561,8 @@ public static PNPResults solvePNP_SQPNP( var best = new Transform3d(tvecToTranslation(tvecs.get(0)), rvecToRotation(rvecs.get(0))); // release our Mats from native memory - objectPoints.release(); - imagePoints.release(); + objectMat.release(); + imageMat.release(); cameraMatrixMat.release(); distCoeffsMat.release(); for (var v : rvecs) v.release(); diff --git a/photon-lib/src/main/java/org/photonvision/estimation/RotTrlTransform3d.java b/photon-lib/src/main/java/org/photonvision/estimation/RotTrlTransform3d.java index 41c3e36d97..0fac0334eb 100644 --- a/photon-lib/src/main/java/org/photonvision/estimation/RotTrlTransform3d.java +++ b/photon-lib/src/main/java/org/photonvision/estimation/RotTrlTransform3d.java @@ -25,6 +25,7 @@ package org.photonvision.estimation; import edu.wpi.first.math.geometry.Pose3d; +import edu.wpi.first.math.geometry.Quaternion; import edu.wpi.first.math.geometry.Rotation3d; import edu.wpi.first.math.geometry.Transform3d; import edu.wpi.first.math.geometry.Translation3d; @@ -37,16 +38,49 @@ public class RotTrlTransform3d { private final Translation3d trl; private final Rotation3d rot; + // TODO: removal awaiting wpilib Rotation3d performance improvements + private double m_w; + private double m_x; + private double m_y; + private double m_z; - public RotTrlTransform3d() { - this(new Rotation3d(), new Translation3d()); + /** + * A rotation-translation transformation. + * + *

    Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose + * transform as if the origin was transformed by these components instead. + * + * @param rot The rotation component + * @param trl The translation component + */ + public RotTrlTransform3d(Rotation3d rot, Translation3d trl) { + this.rot = rot; + var quat = rot.getQuaternion(); + m_w = quat.getW(); + m_x = quat.getX(); + m_y = quat.getY(); + m_z = quat.getZ(); + this.trl = trl; + } + + public RotTrlTransform3d(Pose3d initial, Pose3d last) { + // this.rot = last.getRotation().minus(initial.getRotation()); + // this.trl = last.getTranslation().minus(initial.getTranslation().rotateBy(rot)); + + var quat = initial.getRotation().getQuaternion(); + m_w = quat.getW(); + m_x = quat.getX(); + m_y = quat.getY(); + m_z = quat.getZ(); + this.rot = invrotate(last.getRotation()); + this.trl = last.getTranslation().minus(rotate(initial.getTranslation())); } /** * Creates a rotation-translation transformation from a Transform3d. * - *

    Applying this transformation to poses will preserve their current origin-to-pose transform - * as if the origin was transformed by these components. + *

    Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose + * transform as if the origin was transformed by trf instead. * * @param trf The origin transformation */ @@ -54,18 +88,40 @@ public RotTrlTransform3d(Transform3d trf) { this(trf.getRotation(), trf.getTranslation()); } - /** - * A rotation-translation transformation. - * - *

    Applying this transformation to poses will preserve their current origin-to-pose transform - * as if the origin was transformed by these components. - * - * @param rot The rotation component - * @param trl The translation component - */ - public RotTrlTransform3d(Rotation3d rot, Translation3d trl) { - this.rot = rot; - this.trl = trl; + public RotTrlTransform3d() { + this(new Rotation3d(), new Translation3d()); + } + + private Translation3d rotate(Translation3d otrl) { + final var p = new Quaternion(0.0, otrl.getX(), otrl.getY(), otrl.getZ()); + final var qprime = times(times(p), new Quaternion(m_w, -m_x, -m_y, -m_z)); + return new Translation3d(qprime.getX(), qprime.getY(), qprime.getZ()); + } + + private Translation3d invrotate(Translation3d otrl) { + m_x = -m_x; + m_y = -m_y; + m_z = -m_z; + var result = rotate(otrl); + m_x = -m_x; + m_y = -m_y; + m_z = -m_z; + return result; + } + + private Rotation3d rotate(Rotation3d orot) { + return new Rotation3d(times(orot.getQuaternion())); + } + + private Rotation3d invrotate(Rotation3d orot) { + m_x = -m_x; + m_y = -m_y; + m_z = -m_z; + var result = rotate(orot); + m_x = -m_x; + m_y = -m_y; + m_z = -m_z; + return result; } /** @@ -80,9 +136,12 @@ public static RotTrlTransform3d makeRelativeTo(Pose3d pose) { /** The inverse of this transformation. Applying the inverse will "undo" this transformation. */ public RotTrlTransform3d inverse() { - var inverseRot = rot.unaryMinus(); - var inverseTrl = trl.rotateBy(inverseRot).unaryMinus(); - return new RotTrlTransform3d(inverseRot, inverseTrl); + // var inverseRot = rot.unaryMinus(); + // var inverseTrl = trl.rotateBy(inverseRot).unaryMinus(); + // return new RotTrlTransform3d(inverseRot, inverseTrl); + + var inverseTrl = invrotate(trl).unaryMinus(); + return new RotTrlTransform3d(new Rotation3d(new Quaternion(m_w, -m_x, -m_y, -m_z)), inverseTrl); } /** This transformation as a Transform3d (as if of the origin) */ @@ -101,19 +160,74 @@ public Rotation3d getRotation() { } public Translation3d apply(Translation3d trl) { - return apply(new Pose3d(trl, new Rotation3d())).getTranslation(); + // return trl.rotateBy(rot).plus(this.trl); + return rotate(trl).plus(this.trl); } - ; public List applyTrls(List trls) { return trls.stream().map(this::apply).collect(Collectors.toList()); } + public Rotation3d apply(Rotation3d rot) { + return rotate(rot); + } + + public List applyRots(List rots) { + return rots.stream().map(this::apply).collect(Collectors.toList()); + } + public Pose3d apply(Pose3d pose) { - return new Pose3d(pose.getTranslation().rotateBy(rot).plus(trl), pose.getRotation().plus(rot)); + // return new Pose3d(pose.getTranslation().rotateBy(rot).plus(trl), + // pose.getRotation().plus(rot)); + return new Pose3d(apply(pose.getTranslation()), apply(pose.getRotation())); } public List applyPoses(List poses) { return poses.stream().map(this::apply).collect(Collectors.toList()); } + + // TODO: removal awaiting wpilib Rotation3d performance improvements + private Quaternion times(Quaternion other) { + final double o_w = other.getW(); + final double o_x = other.getX(); + final double o_y = other.getY(); + final double o_z = other.getZ(); + return times(m_w, m_x, m_y, m_z, o_w, o_x, o_y, o_z); + } + + private static Quaternion times(Quaternion a, Quaternion b) { + final double m_w = a.getW(); + final double m_x = a.getX(); + final double m_y = a.getY(); + final double m_z = a.getZ(); + final double o_w = b.getW(); + final double o_x = b.getX(); + final double o_y = b.getY(); + final double o_z = b.getZ(); + return times(m_w, m_x, m_y, m_z, o_w, o_x, o_y, o_z); + } + + private static Quaternion times( + double m_w, + double m_x, + double m_y, + double m_z, + double o_w, + double o_x, + double o_y, + double o_z) { + // https://en.wikipedia.org/wiki/Quaternion#Scalar_and_vector_parts + + // v₁ x v₂ + final double cross_x = m_y * o_z - o_y * m_z; + final double cross_y = o_x * m_z - m_x * o_z; + final double cross_z = m_x * o_y - o_x * m_y; + + // v = w₁v₂ + w₂v₁ + v₁ x v₂ + final double new_x = o_x * m_w + (m_x * o_w) + cross_x; + final double new_y = o_y * m_w + (m_y * o_w) + cross_y; + final double new_z = o_z * m_w + (m_z * o_w) + cross_z; + + return new Quaternion(m_w * o_w - (m_x * o_x + m_y * o_y + m_z * o_z), new_x, new_y, new_z); + } } diff --git a/photon-lib/src/main/java/org/photonvision/estimation/TargetModel.java b/photon-lib/src/main/java/org/photonvision/estimation/TargetModel.java index 52693afb55..90044abb38 100644 --- a/photon-lib/src/main/java/org/photonvision/estimation/TargetModel.java +++ b/photon-lib/src/main/java/org/photonvision/estimation/TargetModel.java @@ -25,8 +25,8 @@ package org.photonvision.estimation; import edu.wpi.first.math.geometry.Pose3d; +import edu.wpi.first.math.geometry.Rotation2d; import edu.wpi.first.math.geometry.Rotation3d; -import edu.wpi.first.math.geometry.Transform3d; import edu.wpi.first.math.geometry.Translation3d; import edu.wpi.first.math.util.Units; import java.util.ArrayList; @@ -36,8 +36,8 @@ /** Describes the 3d model of a target. */ public class TargetModel { /** - * Translations of this target's vertices relative to its pose. If this target is spherical, this - * list has one translation with x == radius. + * Translations of this target's vertices relative to its pose. Rectangular and spherical targets + * will have four vertices. See their respective constructors for more info. */ public final List vertices; @@ -47,7 +47,17 @@ public class TargetModel { public static final TargetModel kTag16h5 = new TargetModel(Units.inchesToMeters(6), Units.inchesToMeters(6)); - /** Creates a rectangular, planar target model given the width and height. */ + /** + * Creates a rectangular, planar target model given the width and height. The model has four + * vertices: + * + *

      + *
    • Point 0: [0, -width/2, -height/2] + *
    • Point 1: [0, width/2, -height/2] + *
    • Point 2: [0, width/2, height/2] + *
    • Point 3: [0, -width/2, height/2] + *
    + */ public TargetModel(double widthMeters, double heightMeters) { this.vertices = List.of( @@ -61,18 +71,64 @@ public TargetModel(double widthMeters, double heightMeters) { } /** - * Creates a spherical target model which has similar dimensions when viewed from any angle. This - * model will only have one vertex which has x == radius. + * Creates a cuboid target model given the length, width, height. The model has eight vertices: + * + *
      + *
    • Point 0: [length/2, -width/2, -height/2] + *
    • Point 1: [length/2, width/2, -height/2] + *
    • Point 2: [length/2, width/2, height/2] + *
    • Point 3: [length/2, -width/2, height/2] + *
    • Point 4: [-length/2, -width/2, height/2] + *
    • Point 5: [-length/2, width/2, height/2] + *
    • Point 6: [-length/2, width/2, -height/2] + *
    • Point 7: [-length/2, -width/2, -height/2] + *
    + */ + public TargetModel(double lengthMeters, double widthMeters, double heightMeters) { + this( + List.of( + new Translation3d(lengthMeters / 2.0, -widthMeters / 2.0, -heightMeters / 2.0), + new Translation3d(lengthMeters / 2.0, widthMeters / 2.0, -heightMeters / 2.0), + new Translation3d(lengthMeters / 2.0, widthMeters / 2.0, heightMeters / 2.0), + new Translation3d(lengthMeters / 2.0, -widthMeters / 2.0, heightMeters / 2.0), + new Translation3d(-lengthMeters / 2.0, -widthMeters / 2.0, heightMeters / 2.0), + new Translation3d(-lengthMeters / 2.0, widthMeters / 2.0, heightMeters / 2.0), + new Translation3d(-lengthMeters / 2.0, widthMeters / 2.0, -heightMeters / 2.0), + new Translation3d(-lengthMeters / 2.0, -widthMeters / 2.0, -heightMeters / 2.0))); + } + + /** + * Creates a spherical target model which has similar dimensions regardless of its rotation. This + * model has four vertices: + * + *
      + *
    • Point 0: [0, -radius, 0] + *
    • Point 1: [0, 0, -radius] + *
    • Point 2: [0, radius, 0] + *
    • Point 3: [0, 0, radius] + *
    + * + * Q: Why these vertices? A: This target should be oriented to the camera every frame, much + * like a sprite/decal, and these vertices represent the ellipse vertices (maxima). These vertices + * are used for drawing the image of this sphere, but do not match the corners that will be + * published by photonvision. */ public TargetModel(double diameterMeters) { - this.vertices = List.of(new Translation3d(diameterMeters / 2.0, 0, 0)); + double radius = diameterMeters / 2.0; + this.vertices = + List.of( + new Translation3d(0, -radius, 0), + new Translation3d(0, 0, -radius), + new Translation3d(0, radius, 0), + new Translation3d(0, 0, radius)); this.isPlanar = false; this.isSpherical = true; } /** * Creates a target model from arbitrary 3d vertices. Automatically determines if the given - * vertices are planar(x == 0). More than 2 vertices must be given. + * vertices are planar(x == 0). More than 2 vertices must be given. If this is a planar model, the + * vertices should define a non-intersecting contour. * * @param vertices Translations representing the vertices of this target model relative to its * pose. @@ -95,13 +151,31 @@ public TargetModel(List vertices) { /** * This target's vertices offset from its field pose. * - *

    Note: If this target is spherical, only one vertex radius meters in front of the pose is - * returned. + *

    Note: If this target is spherical, use {@link #getOrientedPose(Translation3d, + * Translation3d)} with this method. */ public List getFieldVertices(Pose3d targetPose) { - return vertices.stream() - .map(t -> targetPose.plus(new Transform3d(t, new Rotation3d())).getTranslation()) - .collect(Collectors.toList()); + var basisChange = new RotTrlTransform3d(targetPose.getRotation(), targetPose.getTranslation()); + return vertices.stream().map(t -> basisChange.apply(t)).collect(Collectors.toList()); + } + + /** + * Returns a Pose3d with the given target translation oriented (with its relative x-axis aligned) + * to the camera translation. This is used for spherical targets which should not have their + * projection change regardless of their own rotation. + * + * @param tgtTrl This target's translation + * @param cameraTrl Camera's translation + * @return This target's pose oriented to the camera + */ + public static Pose3d getOrientedPose(Translation3d tgtTrl, Translation3d cameraTrl) { + var relCam = cameraTrl.minus(tgtTrl); + var orientToCam = + new Rotation3d( + 0, + new Rotation2d(Math.hypot(relCam.getX(), relCam.getY()), -relCam.getZ()).getRadians(), + new Rotation2d(relCam.getX(), relCam.getY()).getRadians()); + return new Pose3d(tgtTrl, orientToCam); } @Override diff --git a/photon-lib/src/main/java/org/photonvision/estimation/VisionEstimation.java b/photon-lib/src/main/java/org/photonvision/estimation/VisionEstimation.java index caeda4fcda..c9400b2a4c 100644 --- a/photon-lib/src/main/java/org/photonvision/estimation/VisionEstimation.java +++ b/photon-lib/src/main/java/org/photonvision/estimation/VisionEstimation.java @@ -35,6 +35,7 @@ import java.util.List; import java.util.Objects; import java.util.stream.Collectors; +import org.opencv.core.Point; import org.photonvision.targeting.PNPResults; import org.photonvision.targeting.PhotonTrackedTarget; import org.photonvision.targeting.TargetCorner; @@ -67,7 +68,7 @@ public static List getVisibleLayoutTags( * * @param cameraMatrix The camera intrinsics matrix in standard opencv form * @param distCoeffs The camera distortion matrix in standard opencv form - * @param visTags The visible tags reported by PV + * @param visTags The visible tags reported by PV. Non-tag targets are automatically excluded. * @param tagLayout The known tag layout on the field * @return The transformation that maps the field origin to the camera pose. Ensure the {@link * PNPResults} are present before utilizing them. @@ -85,17 +86,28 @@ public static PNPResults estimateCamPosePNP( } var corners = new ArrayList(); - for (var tag : visTags) corners.addAll(tag.getDetectedCorners()); - var knownTags = getVisibleLayoutTags(visTags, tagLayout); + var knownTags = new ArrayList(); + // ensure these are AprilTags in our layout + for (var tgt : visTags) { + int id = tgt.getFiducialId(); + tagLayout + .getTagPose(id) + .ifPresent( + pose -> { + knownTags.add(new AprilTag(id, pose)); + corners.addAll(tgt.getDetectedCorners()); + }); + } if (knownTags.size() == 0 || corners.size() == 0 || corners.size() % 4 != 0) { return new PNPResults(); } + Point[] points = OpenCVHelp.cornersToPoints(corners); // single-tag pnp if (visTags.size() == 1) { var camToTag = OpenCVHelp.solvePNP_SQUARE( - cameraMatrix, distCoeffs, TargetModel.kTag16h5.vertices, corners); + cameraMatrix, distCoeffs, TargetModel.kTag16h5.vertices, points); if (!camToTag.isPresent) return new PNPResults(); var bestPose = knownTags.get(0).pose.transformBy(camToTag.best.inverse()); var altPose = new Pose3d(); @@ -114,7 +126,7 @@ public static PNPResults estimateCamPosePNP( else { var objectTrls = new ArrayList(); for (var tag : knownTags) objectTrls.addAll(TargetModel.kTag16h5.getFieldVertices(tag.pose)); - var camToOrigin = OpenCVHelp.solvePNP_SQPNP(cameraMatrix, distCoeffs, objectTrls, corners); + var camToOrigin = OpenCVHelp.solvePNP_SQPNP(cameraMatrix, distCoeffs, objectTrls, points); if (!camToOrigin.isPresent) return new PNPResults(); return new PNPResults( camToOrigin.best.inverse(), diff --git a/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java b/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java index 0c1fd875eb..1c9ef2ce1e 100644 --- a/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java +++ b/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java @@ -31,6 +31,7 @@ import edu.wpi.first.math.MathUtil; import edu.wpi.first.math.Pair; import edu.wpi.first.math.geometry.Pose3d; +import edu.wpi.first.math.geometry.Rotation2d; import edu.wpi.first.util.RuntimeLoader; import edu.wpi.first.util.WPIUtilJNI; import java.util.ArrayList; @@ -40,18 +41,20 @@ import org.opencv.core.CvType; import org.opencv.core.Mat; import org.opencv.core.Point; +import org.opencv.core.RotatedRect; +import org.opencv.core.Scalar; import org.opencv.core.Size; import org.opencv.imgproc.Imgproc; import org.photonvision.PhotonCamera; import org.photonvision.PhotonTargetSortMode; import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.networktables.NTTopicSet; -import org.photonvision.estimation.CameraTargetRelation; import org.photonvision.estimation.OpenCVHelp; +import org.photonvision.estimation.RotTrlTransform3d; +import org.photonvision.estimation.TargetModel; import org.photonvision.targeting.PNPResults; import org.photonvision.targeting.PhotonPipelineResult; import org.photonvision.targeting.PhotonTrackedTarget; -import org.photonvision.targeting.TargetCorner; /** * A handle for simulating {@link PhotonCamera} values. Processing simulated targets through this @@ -78,6 +81,8 @@ public class PhotonCameraSim implements AutoCloseable { private final CvSource videoSimRaw; private final Mat videoSimFrameRaw = new Mat(); private boolean videoSimRawEnabled = true; + private boolean videoSimWireframeEnabled = false; + private double videoSimWireframeResolution = 0.1; private final CvSource videoSimProcessed; private final Mat videoSimFrameProcessed = new Mat(); private boolean videoSimProcEnabled = true; @@ -200,28 +205,35 @@ public Mat getVideoSimFrameRaw() { * @return If this vision target can be seen before image projection. */ public boolean canSeeTargetPose(Pose3d camPose, VisionTargetSim target) { - var rel = new CameraTargetRelation(camPose, target.getPose()); + // var rel = new CameraTargetRelation(camPose, target.getPose()); + // TODO: removal awaiting wpilib Rotation3d performance improvements + var relTarget = RotTrlTransform3d.makeRelativeTo(camPose).apply(target.getPose()); + var camToTargYaw = new Rotation2d(relTarget.getX(), relTarget.getY()); + var camToTargPitch = + new Rotation2d(Math.hypot(relTarget.getX(), relTarget.getY()), -relTarget.getZ()); + var relCam = RotTrlTransform3d.makeRelativeTo(target.getPose()).apply(camPose); + var targToCamAngle = new Rotation2d(relCam.getX(), Math.hypot(relCam.getY(), relCam.getZ())); + return ( // target translation is outside of camera's FOV - (Math.abs(rel.camToTargYaw.getDegrees()) < prop.getHorizFOV().getDegrees() / 2) - && (Math.abs(rel.camToTargPitch.getDegrees()) < prop.getVertFOV().getDegrees() / 2) + (Math.abs(camToTargYaw.getDegrees()) < prop.getHorizFOV().getDegrees() / 2) + && (Math.abs(camToTargPitch.getDegrees()) < prop.getVertFOV().getDegrees() / 2) && (!target.getModel().isPlanar - || Math.abs(rel.targToCamAngle.getDegrees()) + || Math.abs(targToCamAngle.getDegrees()) < 90) // camera is behind planar target and it should be occluded - && (rel.camToTarg.getTranslation().getNorm() <= maxSightRangeMeters)); // target is too far + && (relTarget.getTranslation().getNorm() <= maxSightRangeMeters)); // target is too far } /** - * Determines if all target corners are inside the camera's image. + * Determines if all target points are inside the camera's image. * - * @param corners The corners of the target as image points(x,y) + * @param points The target's 2d image points */ - public boolean canSeeCorners(List corners) { - // corner is outside of resolution - for (var corner : corners) { - if (MathUtil.clamp(corner.x, 0, prop.getResWidth()) != corner.x - || MathUtil.clamp(corner.y, 0, prop.getResHeight()) != corner.y) { - return false; + public boolean canSeeCorners(Point[] points) { + for (var point : points) { + if (MathUtil.clamp(point.x, 0, prop.getResWidth()) != point.x + || MathUtil.clamp(point.y, 0, prop.getResHeight()) != point.y) { + return false; // point is outside of resolution } } return true; @@ -289,11 +301,35 @@ public void setTargetSortMode(PhotonTargetSortMode sortMode) { if (sortMode != null) this.sortMode = sortMode; } - /** Sets whether the raw video stream simulation is enabled. */ + /** + * Sets whether the raw video stream simulation is enabled. + * + *

    Note: This may increase loop times. + */ public void enableRawStream(boolean enabled) { videoSimRawEnabled = enabled; } + /** + * Sets whether a wireframe of the field is drawn to the raw video stream. + * + *

    Note: This will dramatically increase loop times. + */ + public void enableDrawWireframe(boolean enabled) { + videoSimWireframeEnabled = enabled; + } + + /** + * Sets the resolution of the drawn wireframe if enabled. Drawn line segments will be subdivided + * into smaller segments based on a threshold set by the resolution. + * + * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in + * pixels + */ + public void setWireframeResolution(double resolution) { + videoSimWireframeResolution = resolution; + } + /** Sets whether the processed video stream simulation is enabled. */ public void enableProcessedStream(boolean enabled) { videoSimProcEnabled = enabled; @@ -310,10 +346,12 @@ public PhotonPipelineResult process( if (dist1 == dist2) return 0; return dist1 < dist2 ? 1 : -1; }); - // all targets visible (in FOV) - var visibleTags = new ArrayList>>(); - // all targets actually detectable to the camera + // all targets visible before noise + var visibleTgts = new ArrayList>(); + // all targets actually detected by camera (after noise) var detectableTgts = new ArrayList(); + // basis change from world coordinates to camera coordinates + var camRt = RotTrlTransform3d.makeRelativeTo(cameraPose); // reset our frame VideoSimUtil.updateVideoProp(videoSimRaw, prop); @@ -326,22 +364,64 @@ public PhotonPipelineResult process( if (!canSeeTargetPose(cameraPose, tgt)) continue; // find target's 3d corner points - // TODO: Handle spherical targets var fieldCorners = tgt.getFieldVertices(); - + if (tgt.getModel().isSpherical) { // target is spherical + var model = tgt.getModel(); + // orient the model to the camera (like a sprite/decal) so it appears similar regardless of + // view + fieldCorners = + model.getFieldVertices( + TargetModel.getOrientedPose( + tgt.getPose().getTranslation(), cameraPose.getTranslation())); + } // project 3d target points into 2d image points - var targetCorners = - OpenCVHelp.projectPoints( - prop.getIntrinsics(), prop.getDistCoeffs(), cameraPose, fieldCorners); - // save visible tags for stream simulation - if (tgt.fiducialID >= 0) { - visibleTags.add(new Pair<>(tgt.fiducialID, targetCorners)); + var imagePoints = + OpenCVHelp.projectPoints(prop.getIntrinsics(), prop.getDistCoeffs(), camRt, fieldCorners); + // spherical targets need a rotated rectangle of their midpoints for visualization + if (tgt.getModel().isSpherical) { + var center = OpenCVHelp.avgPoint(imagePoints); + int l = 0, t, b, r = 0; + // reference point (left side midpoint) + for (int i = 1; i < 4; i++) { + if (imagePoints[i].x < imagePoints[l].x) l = i; + } + var lc = imagePoints[l]; + // determine top, right, bottom midpoints + double[] angles = new double[4]; + t = (l + 1) % 4; + b = (l + 1) % 4; + for (int i = 0; i < 4; i++) { + if (i == l) continue; + var ic = imagePoints[i]; + angles[i] = Math.atan2(lc.y - ic.y, ic.x - lc.x); + if (angles[i] >= angles[t]) t = i; + if (angles[i] <= angles[b]) b = i; + } + for (int i = 0; i < 4; i++) { + if (i != t && i != l && i != b) r = i; + } + // create RotatedRect from midpoints + var rect = + new RotatedRect( + new Point(center.x, center.y), + new Size(imagePoints[r].x - lc.x, imagePoints[b].y - imagePoints[t].y), + Math.toDegrees(-angles[r])); + // set target corners to rect corners + Point[] points = new Point[4]; + rect.points(points); + imagePoints = points; } + // save visible targets for raw video stream simulation + visibleTgts.add(new Pair<>(tgt, imagePoints)); // estimate pixel noise - var noisyTargetCorners = prop.estPixelNoise(targetCorners); + var noisyTargetCorners = prop.estPixelNoise(imagePoints); + // find the minimum area rectangle of target corners + var minAreaRect = OpenCVHelp.getMinAreaRect(noisyTargetCorners); + Point[] minAreaRectPts = new Point[4]; + minAreaRect.points(minAreaRectPts); // find the (naive) 2d yaw/pitch - var centerPt = OpenCVHelp.getMinAreaRect(noisyTargetCorners).center; - var centerRot = prop.getPixelRot(new TargetCorner(centerPt.x, centerPt.y)); + var centerPt = minAreaRect.center; + var centerRot = prop.getPixelRot(centerPt); // find contour area double areaPercent = prop.getContourAreaPercent(noisyTargetCorners); @@ -356,20 +436,8 @@ public PhotonPipelineResult process( prop.getDistCoeffs(), tgt.getModel().vertices, noisyTargetCorners); - if (!pnpSim.isPresent) continue; - centerRot = - prop.getPixelRot( - OpenCVHelp.projectPoints( - prop.getIntrinsics(), - prop.getDistCoeffs(), - new Pose3d(), - List.of(pnpSim.best.getTranslation())) - .get(0)); } - Point[] minAreaRectPts = new Point[noisyTargetCorners.size()]; - OpenCVHelp.getMinAreaRect(noisyTargetCorners).points(minAreaRectPts); - detectableTgts.add( new PhotonTrackedTarget( Math.toDegrees(centerRot.getZ()), @@ -380,25 +448,76 @@ public PhotonPipelineResult process( pnpSim.best, pnpSim.alt, pnpSim.ambiguity, - List.of(OpenCVHelp.pointsToTargetCorners(minAreaRectPts)), - noisyTargetCorners)); + OpenCVHelp.pointsToCorners(minAreaRectPts), + OpenCVHelp.pointsToCorners(noisyTargetCorners))); } // render visible tags to raw video frame if (videoSimRawEnabled) { - for (var tag : visibleTags) { - VideoSimUtil.warp16h5TagImage( - tag.getFirst(), OpenCVHelp.targetCornersToMat(tag.getSecond()), videoSimFrameRaw, true); + // draw field wireframe + if (videoSimWireframeEnabled) { + VideoSimUtil.drawFieldWireframe( + camRt, + prop, + videoSimWireframeResolution, + 1.5, + new Scalar(80), + 6, + 1, + new Scalar(30), + videoSimFrameRaw); + } + + // draw targets + for (var pair : visibleTgts) { + var tgt = pair.getFirst(); + var corn = pair.getSecond(); + + if (tgt.fiducialID >= 0) { // apriltags + VideoSimUtil.warp16h5TagImage(tgt.fiducialID, corn, true, videoSimFrameRaw); + } else if (!tgt.getModel().isSpherical) { // non-spherical targets + var contour = corn; + if (!tgt.getModel() + .isPlanar) { // visualization cant handle non-convex projections of 3d models + contour = OpenCVHelp.getConvexHull(contour); + } + VideoSimUtil.drawPoly(contour, -1, new Scalar(255), true, videoSimFrameRaw); + } else { // spherical targets + VideoSimUtil.drawInscribedEllipse(corn, new Scalar(255), videoSimFrameRaw); + } } videoSimRaw.putFrame(videoSimFrameRaw); } else videoSimRaw.setConnectionStrategy(ConnectionStrategy.kForceClose); - // draw/annotate tag detection outline on processed view + // draw/annotate target detection outline on processed view if (videoSimProcEnabled) { Imgproc.cvtColor(videoSimFrameRaw, videoSimFrameProcessed, Imgproc.COLOR_GRAY2BGR); + Imgproc.drawMarker( // crosshair + videoSimFrameProcessed, + new Point(prop.getResWidth() / 2.0, prop.getResHeight() / 2.0), + new Scalar(0, 255, 0), + Imgproc.MARKER_CROSS, + (int) VideoSimUtil.getScaledThickness(15, videoSimFrameProcessed), + (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed), + Imgproc.LINE_AA); for (var tgt : detectableTgts) { - if (tgt.getFiducialId() >= 0) { + if (tgt.getFiducialId() >= 0) { // apriltags VideoSimUtil.drawTagDetection( tgt.getFiducialId(), - OpenCVHelp.targetCornersToMat(tgt.getDetectedCorners()), + OpenCVHelp.cornersToPoints(tgt.getDetectedCorners()), + videoSimFrameProcessed); + } else { // other targets + // bounding rectangle + Imgproc.rectangle( + videoSimFrameProcessed, + OpenCVHelp.getBoundingRect(OpenCVHelp.cornersToPoints(tgt.getDetectedCorners())), + new Scalar(0, 0, 255), + (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed), + Imgproc.LINE_AA); + + VideoSimUtil.drawPoly( + OpenCVHelp.cornersToPoints(tgt.getMinAreaRectCorners()), + (int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed), + new Scalar(255, 30, 30), + true, videoSimFrameProcessed); } } diff --git a/photon-lib/src/main/java/org/photonvision/simulation/SimCameraProperties.java b/photon-lib/src/main/java/org/photonvision/simulation/SimCameraProperties.java index 336c7d5029..35b0ad190f 100644 --- a/photon-lib/src/main/java/org/photonvision/simulation/SimCameraProperties.java +++ b/photon-lib/src/main/java/org/photonvision/simulation/SimCameraProperties.java @@ -25,21 +25,30 @@ package org.photonvision.simulation; import com.fasterxml.jackson.databind.ObjectMapper; +import edu.wpi.first.math.MathUtil; import edu.wpi.first.math.Matrix; import edu.wpi.first.math.Nat; +import edu.wpi.first.math.Pair; import edu.wpi.first.math.VecBuilder; import edu.wpi.first.math.Vector; +import edu.wpi.first.math.geometry.Pose3d; import edu.wpi.first.math.geometry.Rotation2d; import edu.wpi.first.math.geometry.Rotation3d; +import edu.wpi.first.math.geometry.Translation3d; import edu.wpi.first.math.numbers.*; import edu.wpi.first.wpilibj.DriverStation; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.List; import java.util.Random; -import java.util.stream.Collectors; +import org.ejml.data.DMatrix3; +import org.ejml.dense.fixed.CommonOps_DDF3; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.Point; +import org.opencv.imgproc.Imgproc; import org.photonvision.estimation.OpenCVHelp; -import org.photonvision.targeting.TargetCorner; +import org.photonvision.estimation.RotTrlTransform3d; /** * Calibration and performance values for this camera. @@ -69,6 +78,8 @@ public class SimCameraProperties { private double exposureTimeMs = 0; private double avgLatencyMs = 0; private double latencyStdDevMs = 0; + // util + private List viewplanes = new ArrayList<>(); /** Default constructor which is the same as {@link #PERFECT_90DEG} */ public SimCameraProperties() { @@ -151,25 +162,22 @@ public void setRandomSeed(long seed) { } public void setCalibration(int resWidth, int resHeight, Rotation2d fovDiag) { - double s = Math.sqrt(resWidth * resWidth + resHeight * resHeight); - var fovWidth = new Rotation2d(fovDiag.getRadians() * (resWidth / s)); - var fovHeight = new Rotation2d(fovDiag.getRadians() * (resHeight / s)); - - double maxFovDeg = Math.max(fovWidth.getDegrees(), fovHeight.getDegrees()); - if (maxFovDeg > 179) { - double scale = 179.0 / maxFovDeg; - fovWidth = new Rotation2d(fovWidth.getRadians() * scale); - fovHeight = new Rotation2d(fovHeight.getRadians() * scale); + if (fovDiag.getDegrees() < 1 || fovDiag.getDegrees() > 179) { + fovDiag = Rotation2d.fromDegrees(MathUtil.clamp(fovDiag.getDegrees(), 1, 179)); DriverStation.reportError( - "Requested FOV width/height too large! Scaling below 180 degrees...", false); + "Requested invalid FOV! Clamping between (1, 179) degrees...", false); } + double resDiag = Math.sqrt(resWidth * resWidth + resHeight * resHeight); + double diagRatio = Math.tan(fovDiag.getRadians() / 2); + var fovWidth = new Rotation2d(Math.atan(diagRatio * (resWidth / resDiag)) * 2); + var fovHeight = new Rotation2d(Math.atan(diagRatio * (resHeight / resDiag)) * 2); // assume no distortion var distCoeff = VecBuilder.fill(0, 0, 0, 0, 0); // assume centered principal point (pixels) - double cx = resWidth / 2.0; - double cy = resHeight / 2.0; + double cx = resWidth / 2.0 - 0.5; + double cy = resHeight / 2.0 - 0.5; // use given fov to determine focal point (pixels) double fx = cx / Math.tan(fovWidth.getRadians() / 2.0); @@ -186,14 +194,30 @@ public void setCalibration( this.resHeight = resHeight; this.camIntrinsics = camIntrinsics; this.distCoeffs = distCoeffs; - } - - public void setCameraIntrinsics(Matrix camIntrinsics) { - this.camIntrinsics = camIntrinsics; - } - public void setDistortionCoeffs(Matrix distCoeffs) { - this.distCoeffs = distCoeffs; + // left, right, up, and down view planes + var p = + new Translation3d[] { + new Translation3d( + 1, + new Rotation3d(0, 0, getPixelYaw(0).plus(new Rotation2d(-Math.PI / 2)).getRadians())), + new Translation3d( + 1, + new Rotation3d( + 0, 0, getPixelYaw(resWidth).plus(new Rotation2d(Math.PI / 2)).getRadians())), + new Translation3d( + 1, + new Rotation3d( + 0, getPixelPitch(0).plus(new Rotation2d(Math.PI / 2)).getRadians(), 0)), + new Translation3d( + 1, + new Rotation3d( + 0, getPixelPitch(resHeight).plus(new Rotation2d(-Math.PI / 2)).getRadians(), 0)) + }; + viewplanes.clear(); + for (int i = 0; i < p.length; i++) { + viewplanes.add(new DMatrix3(p[i].getX(), p[i].getY(), p[i].getZ())); + } } public void setCalibError(double avgErrorPx, double errorStdDevPx) { @@ -245,6 +269,11 @@ public int getResArea() { return resWidth * resHeight; } + /** Width:height */ + public double getAspectRatio() { + return (double) resWidth / resHeight; + } + public Matrix getIntrinsics() { return camIntrinsics.copy(); } @@ -284,18 +313,16 @@ public SimCameraProperties copy() { return newProp; } - public List undistort(List points) { - return OpenCVHelp.undistortPoints(camIntrinsics, distCoeffs, points); - } - /** * The percentage(0 - 100) of this camera's resolution the contour takes up in pixels of the * image. * - * @param corners Corners of the contour + * @param points Points of the contour */ - public double getContourAreaPercent(List corners) { - return OpenCVHelp.getContourAreaPx(corners) / getResArea() * 100; + public double getContourAreaPercent(Point[] points) { + return Imgproc.contourArea(new MatOfPoint2f(OpenCVHelp.getConvexHull(points))) + / getResArea() + * 100; } /** The yaw from the principal point of this camera to the pixel x value. Positive values left. */ @@ -311,7 +338,7 @@ public Rotation2d getPixelYaw(double pixelX) { * The pitch from the principal point of this camera to the pixel y value. Pitch is positive down. * *

    Note that this angle is naively computed and may be incorrect. See {@link - * #getCorrectedPixelRot(TargetCorner)}. + * #getCorrectedPixelRot(Point)}. */ public Rotation2d getPixelPitch(double pixelY) { double fy = camIntrinsics.get(1, 1); @@ -326,9 +353,9 @@ public Rotation2d getPixelPitch(double pixelY) { * down. * *

    Note that pitch is naively computed and may be incorrect. See {@link - * #getCorrectedPixelRot(TargetCorner)}. + * #getCorrectedPixelRot(Point)}. */ - public Rotation3d getPixelRot(TargetCorner point) { + public Rotation3d getPixelRot(Point point) { return new Rotation3d( 0, getPixelPitch(point.y).getRadians(), getPixelYaw(point.x).getRadians()); } @@ -354,7 +381,7 @@ public Rotation3d getPixelRot(TargetCorner point) { * @return Rotation3d with yaw and pitch of the line projected out of the camera from the given * pixel (roll is zero). */ - public Rotation3d getCorrectedPixelRot(TargetCorner point) { + public Rotation3d getCorrectedPixelRot(Point point) { double fx = camIntrinsics.get(0, 0); double cx = camIntrinsics.get(0, 2); double xOffset = cx - point.x; @@ -389,47 +416,158 @@ public Rotation2d getDiagFOV() { return new Rotation2d(Math.hypot(getHorizFOV().getRadians(), getVertFOV().getRadians())); } - /** Width:height */ - public double getAspectRatio() { - return (double) resWidth / resHeight; - } - /** - * Returns these pixel points as fractions of a 1x1 square image. This means the camera's aspect - * ratio and resolution will be used, and the points' x and y may not reach all portions(e.g. a - * wide aspect ratio means some of the top and bottom of the square image is unreachable). + * Determines where the line segment defined by the two given translations intersects the camera's + * frustum/field-of-vision, if at all. * - * @param points Pixel points on this camera's image - * @return Points mapped to an image of 1x1 resolution + *

    The line is parametrized so any of its points p = t * (b - a) + a. This method + * returns these values of t, minimum first, defining the region of the line segment which is + * visible in the frustum. If both ends of the line segment are visible, this simply returns {0, + * 1}. If, for example, point b is visible while a is not, and half of the line segment is inside + * the camera frustum, {0.5, 1} would be returned. + * + * @param camRt The change in basis from world coordinates to camera coordinates. See {@link + * RotTrlTransform3d#makeRelativeTo(Pose3d)}. + * @param a The initial translation of the line + * @param b The final translation of the line + * @return A Pair of Doubles. The values may be null: + *

      + *
    • {Double, Double} : Two parametrized values(t), minimum first, representing which + * segment of the line is visible in the camera frustum. + *
    • {Double, null} : One value(t) representing a single intersection point. For example, + * the line only intersects the intersection of two adjacent viewplanes. + *
    • {null, null} : No values. The line segment is not visible in the camera frustum. + *
    */ - public List getPixelFraction(List points) { - double resLarge = getAspectRatio() > 1 ? resWidth : resHeight; + public Pair getVisibleLine( + RotTrlTransform3d camRt, Translation3d a, Translation3d b) { + // translations relative to the camera + var rela = camRt.apply(a); + var relb = camRt.apply(b); + + // check if both ends are behind camera + if (rela.getX() <= 0 && relb.getX() <= 0) return new Pair<>(null, null); + + var av = new DMatrix3(rela.getX(), rela.getY(), rela.getZ()); + var bv = new DMatrix3(relb.getX(), relb.getY(), relb.getZ()); + // a to b + var abv = new DMatrix3(); + CommonOps_DDF3.subtract(bv, av, abv); + + // check if the ends of the line segment are visible + boolean aVisible = true; + boolean bVisible = true; + for (int i = 0; i < viewplanes.size(); i++) { + var normal = viewplanes.get(i); + double aVisibility = CommonOps_DDF3.dot(av, normal); + if (aVisibility < 0) aVisible = false; + double bVisibility = CommonOps_DDF3.dot(bv, normal); + if (bVisibility < 0) bVisible = false; + // both ends are outside at least one of the same viewplane + if (aVisibility <= 0 && bVisibility <= 0) return new Pair<>(null, null); + } + // both ends are inside frustum + if (aVisible && bVisible) return new Pair<>(Double.valueOf(0), Double.valueOf(1)); + + // parametrized (t=0 at a, t=1 at b) intersections with viewplanes + double[] intersections = {Double.NaN, Double.NaN, Double.NaN, Double.NaN}; + // intersection points + List ipts = new ArrayList<>(); + for (double val : intersections) ipts.add(null); + + // find intersections + for (int i = 0; i < viewplanes.size(); i++) { + var normal = viewplanes.get(i); + + // we want to know the value of t when the line intercepts this plane + // parametrized: v = t * ab + a, where v lies on the plane + // we can find the projection of a onto the plane normal + // a_projn = normal.times(av.dot(normal) / normal.dot(normal)); + var a_projn = new DMatrix3(); + CommonOps_DDF3.scale( + CommonOps_DDF3.dot(av, normal) / CommonOps_DDF3.dot(normal, normal), normal, a_projn); + // this projection lets us determine the scalar multiple t of ab where + // (t * ab + a) is a vector which lies on the plane + if (Math.abs(CommonOps_DDF3.dot(abv, a_projn)) < 1e-5) continue; // line is parallel to plane + intersections[i] = CommonOps_DDF3.dot(a_projn, a_projn) / -CommonOps_DDF3.dot(abv, a_projn); + + // vector a to the viewplane + var apv = new DMatrix3(); + CommonOps_DDF3.scale(intersections[i], abv, apv); + // av + apv = intersection point + var intersectpt = new DMatrix3(); + CommonOps_DDF3.add(av, apv, intersectpt); + ipts.set(i, intersectpt); + + // discard intersections outside the camera frustum + for (int j = 1; j < viewplanes.size(); j++) { + int oi = (i + j) % viewplanes.size(); + var onormal = viewplanes.get(oi); + // if the dot of the intersection point with any plane normal is negative, it is outside + if (CommonOps_DDF3.dot(intersectpt, onormal) < 0) { + intersections[i] = Double.NaN; + ipts.set(i, null); + break; + } + } + + // discard duplicate intersections + if (ipts.get(i) == null) continue; + for (int j = i - 1; j >= 0; j--) { + var oipt = ipts.get(j); + if (oipt == null) continue; + var diff = new DMatrix3(); + CommonOps_DDF3.subtract(oipt, intersectpt, diff); + if (CommonOps_DDF3.elementMaxAbs(diff) < 1e-4) { + intersections[i] = Double.NaN; + ipts.set(i, null); + break; + } + } + } + + // determine visible segment (minimum and maximum t) + double inter1 = Double.NaN; + double inter2 = Double.NaN; + for (double inter : intersections) { + if (!Double.isNaN(inter)) { + if (Double.isNaN(inter1)) inter1 = inter; + else inter2 = inter; + } + } - return points.stream() - .map( - p -> { - // offset to account for aspect ratio - return new TargetCorner( - (p.x + (resLarge - resWidth) / 2.0) / resLarge, - (p.y + (resLarge - resHeight) / 2.0) / resLarge); - }) - .collect(Collectors.toList()); + // two viewplane intersections + if (!Double.isNaN(inter2)) { + double max = Math.max(inter1, inter2); + double min = Math.min(inter1, inter2); + if (aVisible) min = 0; + if (bVisible) max = 1; + return new Pair<>(min, max); + } + // one viewplane intersection + else if (!Double.isNaN(inter1)) { + if (aVisible) return new Pair<>(Double.valueOf(0), inter1); + if (bVisible) return new Pair<>(inter1, Double.valueOf(1)); + return new Pair<>(inter1, null); + } + // no intersections + else return new Pair<>(null, null); } /** Returns these points after applying this camera's estimated noise. */ - public List estPixelNoise(List points) { + public Point[] estPixelNoise(Point[] points) { if (avgErrorPx == 0 && errorStdDevPx == 0) return points; - return points.stream() - .map( - p -> { - // error pixels in random direction - double error = avgErrorPx + rand.nextGaussian() * errorStdDevPx; - double errorAngle = rand.nextDouble() * 2 * Math.PI - Math.PI; - return new TargetCorner( - p.x + error * Math.cos(errorAngle), p.y + error * Math.sin(errorAngle)); - }) - .collect(Collectors.toList()); + Point[] noisyPts = new Point[points.length]; + for (int i = 0; i < points.length; i++) { + var p = points[i]; + // error pixels in random direction + double error = avgErrorPx + rand.nextGaussian() * errorStdDevPx; + double errorAngle = rand.nextDouble() * 2 * Math.PI - Math.PI; + noisyPts[i] = + new Point(p.x + error * Math.cos(errorAngle), p.y + error * Math.sin(errorAngle)); + } + return noisyPts; } /** diff --git a/photon-lib/src/main/java/org/photonvision/simulation/VideoSimUtil.java b/photon-lib/src/main/java/org/photonvision/simulation/VideoSimUtil.java index d142347a9a..7a32e413a3 100644 --- a/photon-lib/src/main/java/org/photonvision/simulation/VideoSimUtil.java +++ b/photon-lib/src/main/java/org/photonvision/simulation/VideoSimUtil.java @@ -25,9 +25,13 @@ package org.photonvision.simulation; import edu.wpi.first.cscore.CvSource; +import edu.wpi.first.math.geometry.Pose3d; +import edu.wpi.first.math.geometry.Translation3d; +import edu.wpi.first.math.util.Units; import edu.wpi.first.util.RuntimeLoader; import java.awt.image.BufferedImage; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -45,6 +49,7 @@ import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgproc.Imgproc; import org.photonvision.estimation.OpenCVHelp; +import org.photonvision.estimation.RotTrlTransform3d; public class VideoSimUtil { public static final String kLocalTagImagesPath = "./src/main/resources/images/apriltags/"; @@ -57,6 +62,10 @@ public class VideoSimUtil { // Points corresponding to marker(black square) corners of 8x8 16h5 tag images public static final Point[] kTag16h5MarkerPts; + // field dimensions for wireframe + private static double fieldLength = 16.54175; + private static double fieldWidth = 8.0137; + static { try { var loader = @@ -166,23 +175,23 @@ public static Point[] get16h5MarkerPts(int scale) { * @param tagId The id of the specific tag to warp onto the destination image * @param dstPoints Points(4) in destination image where the tag marker(black square) corners * should be warped onto. - * @param destination The destination image to place the warped tag image onto. * @param antialiasing If antialiasing should be performed by automatically * supersampling/interpolating the warped image. This should be used if better stream quality * is desired or target detection is being done on the stream, but can hurt performance. - * @see OpenCVHelp#targetCornersToMat(org.photonvision.targeting.TargetCorner...) + * @param destination The destination image to place the warped tag image onto. */ public static void warp16h5TagImage( - int tagId, MatOfPoint2f dstPoints, Mat destination, boolean antialiasing) { + int tagId, Point[] dstPoints, boolean antialiasing, Mat destination) { Mat tagImage = kTag16h5Images.get(tagId); if (tagImage == null || tagImage.empty()) return; var tagPoints = new MatOfPoint2f(kTag16h5MarkerPts); // points of tag image corners var tagImageCorners = new MatOfPoint2f(getImageCorners(tagImage.size())); + var dstPointMat = new MatOfPoint2f(dstPoints); // the rectangle describing the rectangle-of-interest(ROI) - var boundingRect = Imgproc.boundingRect(dstPoints); + var boundingRect = Imgproc.boundingRect(dstPointMat); // find the perspective transform from the tag image to the warped destination points - Mat perspecTrf = Imgproc.getPerspectiveTransform(tagPoints, dstPoints); + Mat perspecTrf = Imgproc.getPerspectiveTransform(tagPoints, dstPointMat); // check extreme image corners after transform to check if we need to expand bounding rect var extremeCorners = new MatOfPoint2f(); Core.perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf); @@ -238,12 +247,12 @@ public static void warp16h5TagImage( // upscale if supersampling Mat scaledDstPts = new Mat(); if (supersampling > 1) { - Core.multiply(dstPoints, new Scalar(supersampling, supersampling), scaledDstPts); + Core.multiply(dstPointMat, new Scalar(supersampling, supersampling), scaledDstPts); boundingRect.x *= supersampling; boundingRect.y *= supersampling; boundingRect.width *= supersampling; boundingRect.height *= supersampling; - } else dstPoints.assignTo(scaledDstPts); + } else dstPointMat.assignTo(scaledDstPts); // update transform relative to expanded, scaled bounding rect Core.subtract(scaledDstPts, new Scalar(boundingRect.tl().x, boundingRect.tl().y), scaledDstPts); @@ -291,6 +300,58 @@ public static void warp16h5TagImage( tempROI.copyTo(destination.submat(boundingRect), tempMask); } + /** + * Given a line thickness in a 640x480 image, try to scale to the given destination image + * resolution. + * + * @param thickness480p A hypothetical line thickness in a 640x480 image + * @param destinationImg The destination image to scale to + * @return Scaled thickness which cannot be less than 1 + */ + public static double getScaledThickness(double thickness480p, Mat destinationImg) { + double scaleX = destinationImg.width() / 640.0; + double scaleY = destinationImg.height() / 480.0; + double minScale = Math.min(scaleX, scaleY); + return Math.max(thickness480p * minScale, 1.0); + } + + /** + * Draw a filled ellipse in the destination image. + * + * @param dstPoints The points in the destination image representing the rectangle in which the + * ellipse is inscribed. + * @param color The color of the ellipse. This is a scalar with BGR values (0-255) + * @param destination The destination image to draw onto. The image should be in the BGR color + * space. + */ + public static void drawInscribedEllipse(Point[] dstPoints, Scalar color, Mat destination) { + // create RotatedRect from points + var rect = OpenCVHelp.getMinAreaRect(dstPoints); + // inscribe ellipse inside rectangle + Imgproc.ellipse(destination, rect, color, -1, Imgproc.LINE_AA); + } + + /** + * Draw a polygon outline or filled polygon to the destination image with the given points. + * + * @param dstPoints The points in the destination image representing the polygon. + * @param thickness The thickness of the outline in pixels. If this is not positive, a filled + * polygon is drawn instead. + * @param color The color drawn. This should match the color space of the destination image. + * @param isClosed If the last point should connect to the first point in the polygon outline. + * @param destination The destination image to draw onto. + */ + public static void drawPoly( + Point[] dstPoints, int thickness, Scalar color, boolean isClosed, Mat destination) { + var dstPointsd = new MatOfPoint(dstPoints); + if (thickness > 0) { + Imgproc.polylines( + destination, List.of(dstPointsd), isClosed, color, thickness, Imgproc.LINE_AA); + } else { + Imgproc.fillPoly(destination, List.of(dstPointsd), color, Imgproc.LINE_AA); + } + } + /** * Draws a contour around the given points and text of the id onto the destination image. * @@ -300,31 +361,242 @@ public static void warp16h5TagImage( * @param destination The destination image to draw onto. The image should be in the BGR color * space. */ - public static void drawTagDetection(int id, MatOfPoint2f dstPoints, Mat destination) { - var dstPointsd = new MatOfPoint(dstPoints.toArray()); - double scaleX = destination.width() / 640.0; - double scaleY = destination.height() / 480.0; - double minScale = Math.min(scaleX, scaleY); - int thickness = (int) (1 * minScale); - // for(var pt : dstPoints.toArray()) { - // Imgproc.circle(destination, pt, 4, new Scalar(255), 1, Imgproc.LINE_AA); - // } - // Imgproc.rectangle(destination, extremeRect, new Scalar(255), 1, Imgproc.LINE_AA); - // Imgproc.rectangle(destination, Imgproc.boundingRect(dstPoints), new Scalar(255), 1, - // Imgproc.LINE_AA); - Imgproc.polylines( - destination, List.of(dstPointsd), true, new Scalar(0, 0, 255), thickness, Imgproc.LINE_AA); - var textPt = Imgproc.boundingRect(dstPoints).tl(); - textPt.x -= 10.0 * scaleX; - textPt.y -= 12.0 * scaleY; + public static void drawTagDetection(int id, Point[] dstPoints, Mat destination) { + double thickness = getScaledThickness(1, destination); + drawPoly(dstPoints, (int) thickness, new Scalar(0, 0, 255), true, destination); + var rect = Imgproc.boundingRect(new MatOfPoint(dstPoints)); + var textPt = new Point(rect.x + rect.width, rect.y); + textPt.x += thickness; + textPt.y += thickness; Imgproc.putText( destination, String.valueOf(id), textPt, Imgproc.FONT_HERSHEY_PLAIN, - 1.5 * minScale, - new Scalar(0, 0, 255), - thickness, + 1.5 * thickness, + new Scalar(0, 200, 0), + (int) thickness, Imgproc.LINE_AA); } + + /** + * Set the field dimensions that are used for drawing the field wireframe. + * + * @param fieldLengthMeters + * @param fieldWidthMeters + */ + public static void setFieldDimensionsMeters(double fieldLengthMeters, double fieldWidthMeters) { + fieldLength = fieldLengthMeters; + fieldWidth = fieldWidthMeters; + } + + /** + * The translations used to draw the field side walls and driver station walls. It is a List of + * Lists because the translations are not all connected. + */ + private static List> getFieldWallLines() { + var list = new ArrayList>(); + + final double sideHt = Units.inchesToMeters(19.5); + final double driveHt = Units.inchesToMeters(35); + final double topHt = Units.inchesToMeters(78); + + // field floor + list.add( + List.of( + new Translation3d(0, 0, 0), + new Translation3d(fieldLength, 0, 0), + new Translation3d(fieldLength, fieldWidth, 0), + new Translation3d(0, fieldWidth, 0), + new Translation3d(0, 0, 0))); + // right side wall + list.add( + List.of( + new Translation3d(0, 0, 0), + new Translation3d(0, 0, sideHt), + new Translation3d(fieldLength, 0, sideHt), + new Translation3d(fieldLength, 0, 0))); + // red driverstation + list.add( + List.of( + new Translation3d(fieldLength, 0, sideHt), + new Translation3d(fieldLength, 0, topHt), + new Translation3d(fieldLength, fieldWidth, topHt), + new Translation3d(fieldLength, fieldWidth, sideHt))); + list.add( + List.of( + new Translation3d(fieldLength, 0, driveHt), + new Translation3d(fieldLength, fieldWidth, driveHt))); + // left side wall + list.add( + List.of( + new Translation3d(0, fieldWidth, 0), + new Translation3d(0, fieldWidth, sideHt), + new Translation3d(fieldLength, fieldWidth, sideHt), + new Translation3d(fieldLength, fieldWidth, 0))); + // blue driverstation + list.add( + List.of( + new Translation3d(0, 0, sideHt), + new Translation3d(0, 0, topHt), + new Translation3d(0, fieldWidth, topHt), + new Translation3d(0, fieldWidth, sideHt))); + list.add(List.of(new Translation3d(0, 0, driveHt), new Translation3d(0, fieldWidth, driveHt))); + + return list; + } + + /** + * The translations used to draw the field floor subdivisions (not the floor outline). It is a + * List of Lists because the translations are not all connected. + * + * @param subdivisions How many "subdivisions" along the width/length of the floor. E.g. 3 + * subdivisions would mean 2 lines along the length and 2 lines along the width creating a 3x3 + * "grid". + */ + private static List> getFieldFloorLines(int subdivisions) { + var list = new ArrayList>(); + final double subLength = fieldLength / subdivisions; + final double subWidth = fieldWidth / subdivisions; + + // field floor subdivisions + for (int i = 0; i < subdivisions; i++) { + list.add( + List.of( + new Translation3d(0, subWidth * (i + 1), 0), + new Translation3d(fieldLength, subWidth * (i + 1), 0))); + list.add( + List.of( + new Translation3d(subLength * (i + 1), 0, 0), + new Translation3d(subLength * (i + 1), fieldWidth, 0))); + } + + return list; + } + + /** + * Convert 3D lines represented by the given series of translations into a polygon(s) in the + * camera's image. + * + * @param camRt The change in basis from world coordinates to camera coordinates. See {@link + * RotTrlTransform3d#makeRelativeTo(Pose3d)}. + * @param prop The simulated camera's properties. + * @param trls A sequential series of translations defining the polygon to be drawn. + * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in + * pixels. Line segments will be subdivided if they exceed this resolution. + * @param isClosed If the final translation should also draw a line to the first translation. + * @param destination The destination image that is being drawn to. + * @return A list of polygons(which are an array of points) + */ + public static List polyFrom3dLines( + RotTrlTransform3d camRt, + SimCameraProperties prop, + List trls, + double resolution, + boolean isClosed, + Mat destination) { + resolution = Math.hypot(destination.size().height, destination.size().width) * resolution; + List pts = new ArrayList<>(trls); + if (isClosed) pts.add(pts.get(0)); + List polyPointList = new ArrayList<>(); + + for (int i = 0; i < pts.size() - 1; i++) { + var pta = pts.get(i); + var ptb = pts.get(i + 1); + + // check if line is inside camera fulcrum + var inter = prop.getVisibleLine(camRt, pta, ptb); + if (inter.getSecond() == null) continue; + + // cull line to the inside of the camera fulcrum + double inter1 = inter.getFirst().doubleValue(); + double inter2 = inter.getSecond().doubleValue(); + var baseDelta = ptb.minus(pta); + var old_pta = pta; + if (inter1 > 0) pta = old_pta.plus(baseDelta.times(inter1)); + if (inter2 < 1) ptb = old_pta.plus(baseDelta.times(inter2)); + baseDelta = ptb.minus(pta); + + // project points into 2d + var poly = new ArrayList(); + poly.addAll( + Arrays.asList( + OpenCVHelp.projectPoints( + prop.getIntrinsics(), prop.getDistCoeffs(), camRt, List.of(pta, ptb)))); + var pxa = poly.get(0); + var pxb = poly.get(1); + + // subdivide projected line based on desired resolution + double pxDist = Math.hypot(pxb.x - pxa.x, pxb.y - pxa.y); + int subdivisions = (int) (pxDist / resolution); + var subDelta = baseDelta.div(subdivisions + 1); + var subPts = new ArrayList(); + for (int j = 0; j < subdivisions; j++) { + subPts.add(pta.plus(subDelta.times(j + 1))); + } + if (subPts.size() > 0) { + poly.addAll( + 1, + Arrays.asList( + OpenCVHelp.projectPoints( + prop.getIntrinsics(), prop.getDistCoeffs(), camRt, subPts))); + } + + polyPointList.add(poly.toArray(Point[]::new)); + } + + return polyPointList; + } + + /** + * Draw a wireframe of the field to the given image. + * + * @param camRt The change in basis from world coordinates to camera coordinates. See {@link + * RotTrlTransform3d#makeRelativeTo(Pose3d)}. + * @param prop The simulated camera's properties. + * @param resolution Resolution as a fraction(0 - 1) of the video frame's diagonal length in + * pixels. Line segments will be subdivided if they exceed this resolution. + * @param wallThickness Thickness of the lines used for drawing the field walls in pixels. This is + * scaled by {@link #getScaledThickness(double, Mat)}. + * @param wallColor Color of the lines used for drawing the field walls. + * @param floorSubdivisions A NxN "grid" is created from the floor where this parameter is N, + * which defines the floor lines. + * @param floorThickness Thickness of the lines used for drawing the field floor grid in pixels. + * This is scaled by {@link #getScaledThickness(double, Mat)}. + * @param floorColor Color of the lines used for drawing the field floor grid. + * @param destination The destination image to draw to. + */ + public static void drawFieldWireframe( + RotTrlTransform3d camRt, + SimCameraProperties prop, + double resolution, + double wallThickness, + Scalar wallColor, + int floorSubdivisions, + double floorThickness, + Scalar floorColor, + Mat destination) { + for (var trls : getFieldFloorLines(floorSubdivisions)) { + var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination); + for (var poly : polys) { + drawPoly( + poly, + (int) Math.round(getScaledThickness(floorThickness, destination)), + floorColor, + false, + destination); + } + } + for (var trls : getFieldWallLines()) { + var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination); + for (var poly : polys) { + drawPoly( + poly, + (int) Math.round(getScaledThickness(wallThickness, destination)), + wallColor, + false, + destination); + } + } + } } diff --git a/photon-lib/src/main/java/org/photonvision/simulation/VisionSystemSim.java b/photon-lib/src/main/java/org/photonvision/simulation/VisionSystemSim.java index 81736d2dde..4e65bdcbd4 100644 --- a/photon-lib/src/main/java/org/photonvision/simulation/VisionSystemSim.java +++ b/photon-lib/src/main/java/org/photonvision/simulation/VisionSystemSim.java @@ -130,7 +130,8 @@ public boolean removeCamera(PhotonCameraSim cameraSim) { * Get a simulated camera's position relative to the robot. If the requested camera is invalid, an * empty optional is returned. * - * @return The transform of this cameraSim, or an empty optional if it is invalid + * @param cameraSim The specific camera to get the robot-to-camera transform of + * @return The transform of this camera, or an empty optional if it is invalid */ public Optional getRobotToCamera(PhotonCameraSim cameraSim) { return getRobotToCamera(cameraSim, Timer.getFPGATimestamp()); @@ -140,9 +141,9 @@ public Optional getRobotToCamera(PhotonCameraSim cameraSim) { * Get a simulated camera's position relative to the robot. If the requested camera is invalid, an * empty optional is returned. * - * @param cameraSim Specific camera to get the robot-to-camera transform of + * @param cameraSim The specific camera to get the robot-to-camera transform of * @param timeSeconds Timestamp in seconds of when the transform should be observed - * @return The transform of this cameraSim, or an empty optional if it is invalid + * @return The transform of this camera, or an empty optional if it is invalid */ public Optional getRobotToCamera(PhotonCameraSim cameraSim, double timeSeconds) { var trfBuffer = camTrfMap.get(cameraSim); @@ -152,6 +153,31 @@ public Optional getRobotToCamera(PhotonCameraSim cameraSim, double return Optional.of(new Transform3d(new Pose3d(), sample.orElse(new Pose3d()))); } + /** + * Get a simulated camera's position on the field. If the requested camera is invalid, an empty + * optional is returned. + * + * @param cameraSim The specific camera to get the field pose of + * @return The pose of this camera, or an empty optional if it is invalid + */ + public Optional getCameraPose(PhotonCameraSim cameraSim) { + return getCameraPose(cameraSim, Timer.getFPGATimestamp()); + } + + /** + * Get a simulated camera's position on the field. If the requested camera is invalid, an empty + * optional is returned. + * + * @param cameraSim The specific camera to get the field pose of + * @param timeSeconds Timestamp in seconds of when the pose should be observed + * @return The pose of this camera, or an empty optional if it is invalid + */ + public Optional getCameraPose(PhotonCameraSim cameraSim, double timeSeconds) { + var robotToCamera = getRobotToCamera(cameraSim, timeSeconds); + if (robotToCamera.isEmpty()) return Optional.empty(); + return Optional.of(getRobotPose(timeSeconds).plus(robotToCamera.get())); + } + /** * Adjust a camera's position relative to the robot. Use this if your camera is on a gimbal or * turret or some other mobile platform. @@ -217,15 +243,16 @@ public void addVisionTargets(VisionTargetSim... targets) { * PhotonCamera}s simulated from this system will report the location of the camera relative to * the subset of these targets which are visible from the given camera position. * - *

    The AprilTags from this layout will be added as vision targets under the type "apriltags". - * The poses added preserve the tag layout's current alliance origin. + *

    The AprilTags from this layout will be added as vision targets under the type "apriltag". + * The poses added preserve the tag layout's current alliance origin. If the tag layout's alliance + * origin is changed, these added tags will have to be cleared and re-added. * * @param tagLayout The field tag layout to get Apriltag poses and IDs from */ - public void addVisionTargets(AprilTagFieldLayout tagLayout) { + public void addAprilTags(AprilTagFieldLayout tagLayout) { for (AprilTag tag : tagLayout.getTags()) { addVisionTargets( - "apriltags", + "apriltag", new VisionTargetSim( tagLayout.getTagPose(tag.ID).get(), // preserve alliance rotation TargetModel.kTag16h5, @@ -252,6 +279,10 @@ public void clearVisionTargets() { targetSets.clear(); } + public void clearAprilTags() { + removeVisionTargets("apriltag"); + } + public Set removeVisionTargets(String type) { return targetSets.remove(type); } diff --git a/photon-lib/src/main/native/cpp/photonlib/PhotonCamera.cpp b/photon-lib/src/main/native/cpp/photonlib/PhotonCamera.cpp index 7988774324..d0901b10bd 100644 --- a/photon-lib/src/main/native/cpp/photonlib/PhotonCamera.cpp +++ b/photon-lib/src/main/native/cpp/photonlib/PhotonCamera.cpp @@ -115,9 +115,7 @@ void PhotonCamera::TakeOutputSnapshot() { bool PhotonCamera::GetDriverMode() const { return driverModeSubscriber.Get(); } -void PhotonCamera::SetPipelineIndex(int index) { - pipelineIndexPub.Set(static_cast(index)); -} +void PhotonCamera::SetPipelineIndex(int index) { pipelineIndexPub.Set(index); } int PhotonCamera::GetPipelineIndex() const { return static_cast(pipelineIndexSub.Get()); @@ -137,7 +135,7 @@ std::optional PhotonCamera::GetCameraMatrix() { } void PhotonCamera::SetLEDMode(LEDMode mode) { - ledModePub.Set(static_cast(static_cast(mode))); + ledModePub.Set(static_cast(mode)); } const std::string_view PhotonCamera::GetCameraName() const { diff --git a/photon-lib/src/main/native/cpp/photonlib/PhotonPoseEstimator.cpp b/photon-lib/src/main/native/cpp/photonlib/PhotonPoseEstimator.cpp index c2dc8bac50..9282e3bdd5 100644 --- a/photon-lib/src/main/native/cpp/photonlib/PhotonPoseEstimator.cpp +++ b/photon-lib/src/main/native/cpp/photonlib/PhotonPoseEstimator.cpp @@ -115,7 +115,7 @@ std::optional PhotonPoseEstimator::Update( std::optional PhotonPoseEstimator::Update( const PhotonPipelineResult& result, std::optional cameraMatrixData, - std::optional coeffsData) { + std::optional cameraDistCoeffs) { // Time in the past -- give up, since the following if expects times > 0 if (result.GetTimestamp() < 0_s) { return std::nullopt; @@ -136,12 +136,12 @@ std::optional PhotonPoseEstimator::Update( return std::nullopt; } - return Update(result, cameraMatrixData, coeffsData, this->strategy); + return Update(result, cameraMatrixData, cameraDistCoeffs, this->strategy); } std::optional PhotonPoseEstimator::Update( PhotonPipelineResult result, std::optional cameraMatrixData, - std::optional coeffsData, PoseStrategy strategy) { + std::optional cameraDistCoeffs, PoseStrategy strategy) { std::optional ret = std::nullopt; switch (strategy) { @@ -162,7 +162,7 @@ std::optional PhotonPoseEstimator::Update( ret = AverageBestTargetsStrategy(result); break; case ::photonlib::MULTI_TAG_PNP: - ret = MultiTagPnpStrategy(result, coeffsData, cameraMatrixData); + ret = MultiTagPnpStrategy(result, cameraMatrixData, cameraDistCoeffs); break; default: FRC_ReportError(frc::warn::Warning, "Invalid Pose Strategy selected!", @@ -396,14 +396,14 @@ std::optional PhotonPoseEstimator::MultiTagPnpStrategy( return Update(result, camMat, distCoeffs, this->multiTagFallbackStrategy); } - // Use OpenCV ITERATIVE solver + // Output mats for results cv::Mat const rvec(3, 1, cv::DataType::type); cv::Mat const tvec(3, 1, cv::DataType::type); cv::solvePnP(objectPoints, imagePoints, camMat.value(), distCoeffs.value(), rvec, tvec, false, cv::SOLVEPNP_SQPNP); - Pose3d const pose = detail::ToPose3d(tvec, rvec); + const Pose3d pose = detail::ToPose3d(tvec, rvec); return photonlib::EstimatedRobotPose( pose.TransformBy(m_robotToCamera.Inverse()), result.GetTimestamp(), diff --git a/photon-lib/src/test/java/org/photonvision/OpenCVTest.java b/photon-lib/src/test/java/org/photonvision/OpenCVTest.java index 594439e847..e483833111 100644 --- a/photon-lib/src/test/java/org/photonvision/OpenCVTest.java +++ b/photon-lib/src/test/java/org/photonvision/OpenCVTest.java @@ -48,6 +48,7 @@ import org.opencv.core.Core; import org.photonvision.estimation.CameraTargetRelation; import org.photonvision.estimation.OpenCVHelp; +import org.photonvision.estimation.RotTrlTransform3d; import org.photonvision.estimation.TargetModel; import org.photonvision.simulation.SimCameraProperties; import org.photonvision.simulation.VisionSystemSim; @@ -143,36 +144,46 @@ public void testProjection() { new VisionTargetSim( new Pose3d(1, 0, 0, new Rotation3d(0, 0, Math.PI)), TargetModel.kTag16h5, 0); var cameraPose = new Pose3d(0, 0, 0, new Rotation3d()); - var targetCorners = + var camRt = RotTrlTransform3d.makeRelativeTo(cameraPose); + var imagePoints = OpenCVHelp.projectPoints( - prop.getIntrinsics(), prop.getDistCoeffs(), cameraPose, target.getFieldVertices()); + prop.getIntrinsics(), prop.getDistCoeffs(), camRt, target.getFieldVertices()); + // find circulation (counter/clockwise-ness) double circulation = 0; - for (int i = 0; i < targetCorners.size(); i++) { - double xDiff = targetCorners.get((i + 1) % 4).x - targetCorners.get(i).x; - double ySum = targetCorners.get((i + 1) % 4).y + targetCorners.get(i).y; + for (int i = 0; i < imagePoints.length; i++) { + double xDiff = imagePoints[(i + 1) % 4].x - imagePoints[i].x; + double ySum = imagePoints[(i + 1) % 4].y + imagePoints[i].y; circulation += xDiff * ySum; } assertTrue(circulation > 0, "2d fiducial points aren't counter-clockwise"); + // undo projection distortion - targetCorners = prop.undistort(targetCorners); - var avgCenterRot1 = prop.getPixelRot(OpenCVHelp.averageCorner(targetCorners)); + imagePoints = + OpenCVHelp.undistortPoints(prop.getIntrinsics(), prop.getDistCoeffs(), imagePoints); + + // test projection results after moving camera + var avgCenterRot1 = prop.getPixelRot(OpenCVHelp.avgPoint(imagePoints)); cameraPose = cameraPose.plus(new Transform3d(new Translation3d(), new Rotation3d(0, 0.25, 0.25))); - targetCorners = + camRt = RotTrlTransform3d.makeRelativeTo(cameraPose); + imagePoints = OpenCVHelp.projectPoints( - prop.getIntrinsics(), prop.getDistCoeffs(), cameraPose, target.getFieldVertices()); - var avgCenterRot2 = prop.getPixelRot(OpenCVHelp.averageCorner(targetCorners)); + prop.getIntrinsics(), prop.getDistCoeffs(), camRt, target.getFieldVertices()); + var avgCenterRot2 = prop.getPixelRot(OpenCVHelp.avgPoint(imagePoints)); + var yaw2d = new Rotation2d(avgCenterRot2.getZ()); var pitch2d = new Rotation2d(avgCenterRot2.getY()); var yawDiff = yaw2d.minus(new Rotation2d(avgCenterRot1.getZ())); var pitchDiff = pitch2d.minus(new Rotation2d(avgCenterRot1.getY())); assertTrue(yawDiff.getRadians() < 0, "2d points don't follow yaw"); assertTrue(pitchDiff.getRadians() < 0, "2d points don't follow pitch"); + var actualRelation = new CameraTargetRelation(cameraPose, target.getPose()); assertEquals( actualRelation.camToTargPitch.getDegrees(), - pitchDiff.getDegrees() * Math.cos(yaw2d.getRadians()), // adjust for perpsective distortion + pitchDiff.getDegrees() + * Math.cos(yaw2d.getRadians()), // adjust for unaccounted perpsective distortion kRotDeltaDeg, "2d pitch doesn't match 3d"); assertEquals( @@ -184,23 +195,31 @@ public void testProjection() { @Test public void testSolvePNP_SQUARE() { + // square AprilTag target var target = new VisionTargetSim( new Pose3d(5, 0.5, 1, new Rotation3d(0, 0, Math.PI)), TargetModel.kTag16h5, 0); var cameraPose = new Pose3d(0, 0, 0, new Rotation3d()); - var actualRelation = new CameraTargetRelation(cameraPose, target.getPose()); + var camRt = RotTrlTransform3d.makeRelativeTo(cameraPose); + // target relative to camera + var relTarget = camRt.apply(target.getPose()); + + // simulate solvePNP estimation var targetCorners = OpenCVHelp.projectPoints( - prop.getIntrinsics(), prop.getDistCoeffs(), cameraPose, target.getFieldVertices()); + prop.getIntrinsics(), prop.getDistCoeffs(), camRt, target.getFieldVertices()); var pnpSim = OpenCVHelp.solvePNP_SQUARE( prop.getIntrinsics(), prop.getDistCoeffs(), target.getModel().vertices, targetCorners); - var estRelation = new CameraTargetRelation(cameraPose, cameraPose.plus(pnpSim.best)); - assertSame(actualRelation.camToTarg, estRelation.camToTarg); + + // check solvePNP estimation accuracy + assertSame(relTarget.getRotation(), pnpSim.best.getRotation()); + assertSame(relTarget.getTranslation(), pnpSim.best.getTranslation()); } @Test public void testSolvePNP_SQPNP() { + // (for targets with arbitrary number of non-colinear points > 2) var target = new VisionTargetSim( new Pose3d(5, 0.5, 1, new Rotation3d(0, 0, Math.PI)), @@ -216,14 +235,20 @@ public void testSolvePNP_SQPNP() { new Translation3d(-1, 0, 0))), 0); var cameraPose = new Pose3d(0, 0, 0, new Rotation3d()); - var actualRelation = new CameraTargetRelation(cameraPose, target.getPose()); + var camRt = RotTrlTransform3d.makeRelativeTo(cameraPose); + // target relative to camera + var relTarget = camRt.apply(target.getPose()); + + // simulate solvePNP estimation var targetCorners = OpenCVHelp.projectPoints( - prop.getIntrinsics(), prop.getDistCoeffs(), cameraPose, target.getFieldVertices()); + prop.getIntrinsics(), prop.getDistCoeffs(), camRt, target.getFieldVertices()); var pnpSim = OpenCVHelp.solvePNP_SQPNP( prop.getIntrinsics(), prop.getDistCoeffs(), target.getModel().vertices, targetCorners); - var estRelation = new CameraTargetRelation(cameraPose, cameraPose.plus(pnpSim.best)); - assertSame(actualRelation.camToTarg, estRelation.camToTarg); + + // check solvePNP estimation accuracy + assertSame(relTarget.getRotation(), pnpSim.best.getRotation()); + assertSame(relTarget.getTranslation(), pnpSim.best.getTranslation()); } } diff --git a/photon-lib/src/test/java/org/photonvision/VisionSystemSimTest.java b/photon-lib/src/test/java/org/photonvision/VisionSystemSimTest.java index 6f7720a666..5ab7d00ce2 100644 --- a/photon-lib/src/test/java/org/photonvision/VisionSystemSimTest.java +++ b/photon-lib/src/test/java/org/photonvision/VisionSystemSimTest.java @@ -348,7 +348,7 @@ private static Stream distCalCParamProvider() { Arguments.of(20, -42, 0), Arguments.of(5, -35, 2), Arguments.of(6, -35, 0), - Arguments.of(10, -34, 3.2), + Arguments.of(10, -34, 2.4), Arguments.of(15, -33, 0), Arguments.of(19.52, -15.98, 1.1)); } @@ -376,23 +376,28 @@ public void testDistanceCalc(double testDist, double testPitch, double testHeigh cameraSim.prop.setCalibration(640, 480, Rotation2d.fromDegrees(160)); cameraSim.setMinTargetAreaPixels(0.0); visionSysSim.adjustCamera(cameraSim, robotToCamera); - // note that non-fiducial targets have different center point calculation and will - // return slightly inaccurate yaw/pitch values visionSysSim.addVisionTargets(new VisionTargetSim(targetPose, new TargetModel(0.5, 0.5), 0)); visionSysSim.update(robotPose); + + // Note that target 2d yaw/pitch accuracy is hindered by two factors in photonvision: + // 1. These are calculated with the average of the minimum area rectangle, which does not + // actually find the target center because of perspective distortion. + // 2. Yaw and pitch are calculated separately which gives incorrect pitch values. var res = camera.getLatestResult(); assertTrue(res.hasTargets()); var tgt = res.getBestTarget(); - assertEquals(0.0, tgt.getYaw(), kRotDeltaDeg); + assertEquals(0.0, tgt.getYaw(), 0.5); + // Distance calculation using this trigonometry may be wildly incorrect when + // there is not much height difference between the target and the camera. double distMeas = PhotonUtils.calculateDistanceToTargetMeters( robotToCamera.getZ(), targetPose.getZ(), Units.degreesToRadians(-testPitch), Units.degreesToRadians(tgt.getPitch())); - assertEquals(Units.feetToMeters(testDist), distMeas, kTrlDelta); + assertEquals(Units.feetToMeters(testDist), distMeas, 0.15); } @Test diff --git a/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/Drivetrain.java b/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/Drivetrain.java index f33d9f0510..dba9ee3bca 100644 --- a/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/Drivetrain.java +++ b/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/Drivetrain.java @@ -142,7 +142,7 @@ public Drivetrain() { var cameraSim = new PhotonCameraSim(pcw.photonCamera, SimCameraProperties.PI4_LIFECAM_640_480()); m_visionSystemSim.addCamera(cameraSim, Constants.VisionConstants.robotToCam); - m_visionSystemSim.addVisionTargets(pcw.photonPoseEstimator.getFieldTags()); + m_visionSystemSim.addAprilTags(pcw.photonPoseEstimator.getFieldTags()); } }