diff --git a/photon-core/src/main/java/org/photonvision/common/util/math/MathUtils.java b/photon-core/src/main/java/org/photonvision/common/util/math/MathUtils.java index 526311afc5..2b6bb31ae3 100644 --- a/photon-core/src/main/java/org/photonvision/common/util/math/MathUtils.java +++ b/photon-core/src/main/java/org/photonvision/common/util/math/MathUtils.java @@ -17,10 +17,12 @@ package org.photonvision.common.util.math; -import edu.wpi.first.math.MatBuilder; -import edu.wpi.first.math.Nat; +import edu.wpi.first.apriltag.AprilTagPoseEstimate; import edu.wpi.first.math.VecBuilder; -import edu.wpi.first.math.geometry.*; +import edu.wpi.first.math.geometry.CoordinateSystem; +import edu.wpi.first.math.geometry.Pose3d; +import edu.wpi.first.math.geometry.Rotation3d; +import edu.wpi.first.math.geometry.Transform3d; import edu.wpi.first.math.util.Units; import edu.wpi.first.util.WPIUtilJNI; import java.util.Arrays; @@ -137,73 +139,46 @@ public static double lerp(double startValue, double endValue, double t) { return startValue + (endValue - startValue) * t; } - public static Pose3d EDNtoNWU(final Pose3d pose) { - // Change of basis matrix from EDN to NWU. Each column vector is one of the - // old basis vectors mapped to its representation in the new basis. - // - // E (+X) -> N (-Y), D (+Y) -> W (-Z), N (+Z) -> U (+X) - var R = new MatBuilder<>(Nat.N3(), Nat.N3()).fill(0, 0, 1, -1, 0, 0, 0, -1, 0); - - // https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ - double w = Math.sqrt(1.0 + R.get(0, 0) + R.get(1, 1) + R.get(2, 2)) / 2.0; - double x = (R.get(2, 1) - R.get(1, 2)) / (4.0 * w); - double y = (R.get(0, 2) - R.get(2, 0)) / (4.0 * w); - double z = (R.get(1, 0) - R.get(0, 1)) / (4.0 * w); - var rotationQuat = new Rotation3d(new Quaternion(w, x, y, z)); - - return new Pose3d( - pose.getTranslation().rotateBy(rotationQuat), pose.getRotation().rotateBy(rotationQuat)); - } - /** - * All our solvepnp code returns a tag with X left, Y up, and Z out of the tag To better match - * wpilib, we want to apply another rotation so that we get Z up, X out of the tag, and Y to the - * right. We apply the following change of basis: X -> Y Y -> Z Z -> X + * OpenCV uses the EDN coordinate system, but WPILib uses NWU. Converts a camera-to-target + * transformation from EDN to NWU. + * + *

Note: The detected target's rvec and tvec perform a rotation-translation transformation + * which converts points in the target's coordinate system to the camera's. This means applying + * the transformation to the target point (0,0,0) for example would give the target's center + * relative to the camera. Conveniently, if we make a translation-rotation transformation out of + * these components instead, we get the transformation from the camera to the target. + * + * @param cameraToTarget3d A camera-to-target Transform3d in EDN. + * @return A camera-to-target Transform3d in NWU. */ - private static final Rotation3d WPILIB_BASE_ROTATION = - new Rotation3d(new MatBuilder<>(Nat.N3(), Nat.N3()).fill(0, 1, 0, 0, 0, 1, 1, 0, 0)); - public static Transform3d convertOpenCVtoPhotonTransform(Transform3d cameraToTarget3d) { // TODO: Refactor into new pipe? - // CameraToTarget _should_ be in opencv-land EDN - var nwu = - CoordinateSystem.convert( - new Pose3d().transformBy(cameraToTarget3d), - CoordinateSystem.EDN(), - CoordinateSystem.NWU()); - return new Transform3d(nwu.getTranslation(), WPILIB_BASE_ROTATION.rotateBy(nwu.getRotation())); - } - - public static Pose3d convertOpenCVtoPhotonPose(Transform3d cameraToTarget3d) { - // TODO: Refactor into new pipe? - // CameraToTarget _should_ be in opencv-land EDN - var nwu = - CoordinateSystem.convert( - new Pose3d().transformBy(cameraToTarget3d), - CoordinateSystem.EDN(), - CoordinateSystem.NWU()); - return new Pose3d(nwu.getTranslation(), WPILIB_BASE_ROTATION.rotateBy(nwu.getRotation())); + return CoordinateSystem.convert( + cameraToTarget3d, CoordinateSystem.EDN(), CoordinateSystem.NWU()); } /* - * The AprilTag pose rotation outputs are X left, Y down, Z away from the tag - * with the tag facing - * the camera upright and the camera facing the target parallel to the floor. - * But our OpenCV - * solvePNP code would have X left, Y up, Z towards the camera with the target - * facing the camera - * and both parallel to the floor. So we apply a base rotation to the rotation - * component of the - * apriltag pose to make it consistent with the EDN system that OpenCV uses, - * internally a 180 - * rotation about the X axis + * From the AprilTag repo: + * "The coordinate system has the origin at the camera center. The z-axis points from the camera + * center out the camera lens. The x-axis is to the right in the image taken by the camera, and + * y is down. The tag's coordinate frame is centered at the center of the tag, with x-axis to the + * right, y-axis down, and z-axis into the tag." + * + * This means our detected transformation will be in EDN. Our subsequent uses of the transformation, + * however, assume the tag's z-axis point away from the tag instead of into it. This means we + * have to correct the transformation's rotation. */ private static final Rotation3d APRILTAG_BASE_ROTATION = - new Rotation3d(VecBuilder.fill(1, 0, 0), Units.degreesToRadians(180)); + new Rotation3d(VecBuilder.fill(0, 1, 0), Units.degreesToRadians(180)); /** - * Apply a 180-degree rotation about X to the rotation component of a given Apriltag pose. This - * aligns it with the OpenCV poses we use in other places. + * AprilTag returns a camera-to-tag transform in EDN, but the tag's z-axis points into the tag + * instead of away from it and towards the camera. This means we have to correct the + * transformation's rotation. + * + * @param pose The Transform3d with translation and rotation directly from the {@link + * AprilTagPoseEstimate}. */ public static Transform3d convertApriltagtoOpenCV(Transform3d pose) { var ocvRotation = APRILTAG_BASE_ROTATION.rotateBy(pose.getRotation()); diff --git a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/CornerDetectionPipe.java b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/CornerDetectionPipe.java index ff7f6fd3ea..bed3c6b5ef 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/CornerDetectionPipe.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/CornerDetectionPipe.java @@ -17,7 +17,6 @@ package org.photonvision.vision.pipe.impl; -import edu.wpi.first.math.geometry.Translation2d; import java.util.*; import org.opencv.core.MatOfPoint2f; import org.opencv.core.Point; @@ -25,6 +24,10 @@ import org.photonvision.vision.pipe.CVPipe; import org.photonvision.vision.target.TrackedTarget; +/** + * Determines the target corners of the {@link TrackedTarget}. The {@link + * CornerDetectionPipeParameters} affect how these corners are calculated. + */ public class CornerDetectionPipe extends CVPipe< List, @@ -49,15 +52,15 @@ protected List process(List targetList) { } /** - * @param target the target to find the corners of. - * @return the corners. left top, left bottom, right bottom, right top + * @param target The target to find the corners of. + * @return Corners: (bottom-left, bottom-right, top-right, top-left) */ private List findBoundingBoxCorners(TrackedTarget target) { // extract the corners var points = new Point[4]; target.m_mainContour.getMinAreaRect().points(points); - // find the tl/tr/bl/br corners + // find the bl/br/tr/tl corners // first, min by left/right var list_ = Arrays.asList(points); list_.sort(leftRightComparator); @@ -68,13 +71,12 @@ private List findBoundingBoxCorners(TrackedTarget target) { var right = new ArrayList<>(List.of(list_.get(2), list_.get(3))); right.sort(verticalComparator); - // tl tr bl br var tl = left.get(0); var bl = left.get(1); var tr = right.get(0); var br = right.get(1); - return List.of(tl, bl, br, tr); + return List.of(bl, br, tr, tl); } /** @@ -83,31 +85,22 @@ private List findBoundingBoxCorners(TrackedTarget target) { * @return The straight line distance between them. */ private static double distanceBetween(Point a, Point b) { - return Math.sqrt(Math.pow(a.x - b.x, 2) + Math.pow(a.y - b.y, 2)); - } - - /** - * @param a First point. - * @param b Second point. - * @return The straight line distance between them. - */ - private static double distanceBetween(Translation2d a, Translation2d b) { - return Math.sqrt(Math.pow(a.getX() - b.getX(), 2) + Math.pow(a.getY() - b.getY(), 2)); + double xDelta = a.x - b.x; + double yDelta = a.y - b.y; + return Math.sqrt(xDelta * xDelta + yDelta * yDelta); } /** - * Find the 4 most extreme corners, + * Find the 4 most extreme corners of the target's contour. * - * @param target the target to track. - * @param convexHull weather to use the convex hull of the target. - * @return the 4 extreme corners of the contour. + * @param target The target to track. + * @param convexHull Whether to use the convex hull of the contour instead. + * @return The 4 extreme corners of the contour: (bottom-left, bottom-right, top-right, top-left) */ private List detectExtremeCornersByApproxPolyDp(TrackedTarget target, boolean convexHull) { var centroid = target.getMinAreaRect().center; - Comparator distanceProvider = - Comparator.comparingDouble( - (Point point) -> - Math.sqrt(Math.pow(centroid.x - point.x, 2) + Math.pow(centroid.y - point.y, 2))); + Comparator compareCenterDist = + Comparator.comparingDouble((Point point) -> distanceBetween(centroid, point)); MatOfPoint2f targetContour; if (convexHull) { @@ -141,17 +134,17 @@ private List detectExtremeCornersByApproxPolyDp(TrackedTarget target, boo // left top, left bottom, right bottom, right top var boundingBoxCorners = findBoundingBoxCorners(target); - var distanceToTlComparator = - Comparator.comparingDouble((Point p) -> distanceBetween(p, boundingBoxCorners.get(0))); - - var distanceToTrComparator = + var compareDistToTl = Comparator.comparingDouble((Point p) -> distanceBetween(p, boundingBoxCorners.get(3))); - // top left and top right are the poly corners closest to the bounding box tl and tr - pointList.sort(distanceToTlComparator); + var compareDistToTr = + Comparator.comparingDouble((Point p) -> distanceBetween(p, boundingBoxCorners.get(2))); + + // top left and top right are the poly corners closest to the bouding box tl and tr + pointList.sort(compareDistToTl); var tl = pointList.get(0); pointList.remove(tl); - pointList.sort(distanceToTrComparator); + pointList.sort(compareDistToTr); var tr = pointList.get(0); pointList.remove(tr); @@ -177,11 +170,11 @@ private List detectExtremeCornersByApproxPolyDp(TrackedTarget target, boo } } if (leftList.isEmpty() || rightList.isEmpty()) return null; - leftList.sort(distanceProvider); - rightList.sort(distanceProvider); + leftList.sort(compareCenterDist); + rightList.sort(compareCenterDist); var bl = leftList.get(leftList.size() - 1); var br = rightList.get(rightList.size() - 1); - return List.of(tl, bl, br, tr); + return List.of(bl, br, tr, tl); } public static class CornerDetectionPipeParameters { diff --git a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/Draw3dTargetsPipe.java b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/Draw3dTargetsPipe.java index b88bd0ba17..99927ea0f1 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/Draw3dTargetsPipe.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/Draw3dTargetsPipe.java @@ -127,7 +127,7 @@ protected Void process(Pair> in) { // Draw X, Y and Z axis MatOfPoint3f pointMat = new MatOfPoint3f(); - // Those points are in opencv-land, but we are in NWU + // OpenCV expects coordinates in EDN, but we want to visualize in NWU // NWU | EDN // X: Z // Y: -X @@ -136,11 +136,15 @@ protected Void process(Pair> in) { var list = List.of( new Point3(0, 0, 0), - new Point3(0, 0, AXIS_LEN), - new Point3(AXIS_LEN, 0, 0), - new Point3(0, AXIS_LEN, 0)); + new Point3(0, 0, AXIS_LEN), // x-axis + new Point3(-AXIS_LEN, 0, 0), // y-axis + new Point3(0, -AXIS_LEN, 0)); // z-axis pointMat.fromList(list); + // The detected target's rvec and tvec perform a rotation-translation transformation which + // converts points in the target's coordinate system to the camera's. This means applying + // the transformation to the target point (0,0,0) for example would give the target's center + // relative to the camera. Calib3d.projectPoints( pointMat, target.getCameraRelativeRvec(), @@ -152,19 +156,22 @@ protected Void process(Pair> in) { var axisPoints = tempMat.toList(); dividePointList(axisPoints); - // Red = x, green y, blue z + // XYZ is RGB + // y-axis = green Imgproc.line( in.getLeft(), axisPoints.get(0), axisPoints.get(2), ColorHelper.colorToScalar(Color.GREEN), 3); + // z-axis = blue Imgproc.line( in.getLeft(), axisPoints.get(0), axisPoints.get(3), ColorHelper.colorToScalar(Color.BLUE), 3); + // x-axis = red Imgproc.line( in.getLeft(), axisPoints.get(0), @@ -172,6 +179,7 @@ protected Void process(Pair> in) { ColorHelper.colorToScalar(Color.RED), 3); + // box edges perpendicular to tag for (int i = 0; i < bottomPoints.size(); i++) { Imgproc.line( in.getLeft(), @@ -180,6 +188,7 @@ protected Void process(Pair> in) { ColorHelper.colorToScalar(Color.blue), 3); } + // box edges parallel to tag for (int i = 0; i < topPoints.size(); i++) { Imgproc.line( in.getLeft(), @@ -265,18 +274,6 @@ private void divideMat2f(MatOfPoint2f src, MatOfPoint dst) { dst.fromArray(pointArray); } - private void divideMat2f(MatOfPoint2f src, MatOfPoint2f dst) { - var hull = src.toArray(); - var pointArray = new Point[hull.length]; - for (int i = 0; i < hull.length; i++) { - var hullAtI = hull[i]; - pointArray[i] = - new Point( - hullAtI.x / (double) params.divisor.value, hullAtI.y / (double) params.divisor.value); - } - dst.fromArray(pointArray); - } - /** Scale a given point list by the current frame divisor. the point list is mutated! */ private void dividePointList(List points) { for (var p : points) { diff --git a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/SolvePNPPipe.java b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/SolvePNPPipe.java index db773f6375..6a887d3a18 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/SolvePNPPipe.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/SolvePNPPipe.java @@ -18,7 +18,6 @@ package org.photonvision.vision.pipe.impl; import edu.wpi.first.math.VecBuilder; -import edu.wpi.first.math.geometry.Pose3d; import edu.wpi.first.math.geometry.Rotation3d; import edu.wpi.first.math.geometry.Transform3d; import edu.wpi.first.math.geometry.Translation3d; @@ -27,7 +26,6 @@ import org.opencv.core.Core; import org.opencv.core.Mat; import org.opencv.core.MatOfPoint2f; -import org.opencv.core.Scalar; import org.photonvision.common.logging.LogGroup; import org.photonvision.common.logging.Logger; import org.photonvision.common.util.math.MathUtils; @@ -99,33 +97,12 @@ private void calculateTargetPose(TrackedTarget target) { VecBuilder.fill(rVec.get(0, 0)[0], rVec.get(1, 0)[0], rVec.get(2, 0)[0]), Core.norm(rVec)); - Pose3d targetPose = MathUtils.convertOpenCVtoPhotonPose(new Transform3d(translation, rotation)); - target.setBestCameraToTarget3d( - new Transform3d(targetPose.getTranslation(), targetPose.getRotation())); + Transform3d camToTarget = + MathUtils.convertOpenCVtoPhotonTransform(new Transform3d(translation, rotation)); + target.setBestCameraToTarget3d(camToTarget); target.setAltCameraToTarget3d(new Transform3d()); } - Mat rotationMatrix = new Mat(); - Mat inverseRotationMatrix = new Mat(); - Mat pzeroWorld = new Mat(); - Mat kMat = new Mat(); - Mat scaledTvec; - - /** - * Element-wise scale a matrix by a given factor - * - * @param src the source matrix - * @param factor by how much to scale each element - * @return the scaled matrix - */ - @SuppressWarnings("SameParameterValue") - private static Mat matScale(Mat src, double factor) { - Mat dst = new Mat(src.rows(), src.cols(), src.type()); - Scalar s = new Scalar(factor); - Core.multiply(src, s, dst); - return dst; - } - public static class SolvePNPPipeParams { private final CameraCalibrationCoefficients cameraCoefficients; private final TargetModel targetModel; diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java index af1c5145d4..c7ffe1a83c 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java @@ -146,8 +146,10 @@ protected CVPipelineResult process(Frame frame, AprilTagPipelineSettings setting new TargetCalculationParameters( false, null, null, null, null, frameStaticProperties)); - var correctedBestPose = MathUtils.convertOpenCVtoPhotonPose(target.getBestCameraToTarget3d()); - var correctedAltPose = MathUtils.convertOpenCVtoPhotonPose(target.getAltCameraToTarget3d()); + var correctedBestPose = + MathUtils.convertOpenCVtoPhotonTransform(target.getBestCameraToTarget3d()); + var correctedAltPose = + MathUtils.convertOpenCVtoPhotonTransform(target.getAltCameraToTarget3d()); target.setBestCameraToTarget3d( new Transform3d(correctedBestPose.getTranslation(), correctedBestPose.getRotation())); diff --git a/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java b/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java index ea06638af8..198cbd5139 100644 --- a/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java +++ b/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java @@ -25,37 +25,50 @@ import org.opencv.core.MatOfPoint3f; import org.opencv.core.Point3; import org.photonvision.vision.opencv.Releasable; - +import org.photonvision.vision.pipe.impl.CornerDetectionPipe; +import org.photonvision.vision.pipe.impl.SolvePNPPipe; + +/** + * A model representing the vertices of targets with known shapes. The vertices are in the EDN + * coordinate system. When creating a TargetModel, the vertices must be supplied in a certain order + * to ensure correct correspondence with corners detected in 2D for use with SolvePNP. For planar + * targets, we expect the target's Z-axis to point towards the camera. + * + *

{@link SolvePNPPipe} expects 3d object points to correspond to the {@link CornerDetectionPipe} + * implementation. The 2d corner detection finds the 4 extreme corners (bottom-left, bottom-right, + * top-right, top-left). To match our expectations, this means the model vertices would look like: + * + *

+ * + *

AprilTag models are currently only used for drawing on the output stream. + */ public enum TargetModel implements Releasable { - k2020HighGoalOuter( - List.of( - new Point3(Units.inchesToMeters(-19.625), 0, 0), - new Point3(Units.inchesToMeters(-9.819867), Units.inchesToMeters(-17), 0), - new Point3(Units.inchesToMeters(9.819867), Units.inchesToMeters(-17), 0), - new Point3(Units.inchesToMeters(19.625), 0, 0)), - Units.inchesToMeters(12)), - k2020HighGoalInner( + k2016HighGoal( List.of( - new Point3(Units.inchesToMeters(-19.625), 0, Units.inchesToMeters(2d * 12d + 5.25)), - new Point3( - Units.inchesToMeters(-9.819867), - Units.inchesToMeters(-17), - Units.inchesToMeters(2d * 12d + 5.25)), - new Point3( - Units.inchesToMeters(9.819867), - Units.inchesToMeters(-17), - Units.inchesToMeters(2d * 12d + 5.25)), - new Point3(Units.inchesToMeters(19.625), 0, Units.inchesToMeters(2d * 12d + 5.25))), - Units.inchesToMeters(12)), - + new Point3(Units.inchesToMeters(10), Units.inchesToMeters(6), 0), + new Point3(Units.inchesToMeters(-10), Units.inchesToMeters(6), 0), + new Point3(Units.inchesToMeters(-10), Units.inchesToMeters(-6), 0), + new Point3(Units.inchesToMeters(10), Units.inchesToMeters(-6), 0)), + Units.inchesToMeters(6)), k2019DualTarget( List.of( - new Point3(Units.inchesToMeters(-5.936), Units.inchesToMeters(2.662), 0), - new Point3(Units.inchesToMeters(-7.313), Units.inchesToMeters(-2.662), 0), - new Point3(Units.inchesToMeters(7.313), Units.inchesToMeters(-2.662), 0), - new Point3(Units.inchesToMeters(5.936), Units.inchesToMeters(2.662), 0)), + new Point3(Units.inchesToMeters(7.313), Units.inchesToMeters(2.662), 0), + new Point3(Units.inchesToMeters(-7.313), Units.inchesToMeters(2.662), 0), + new Point3(Units.inchesToMeters(-5.936), Units.inchesToMeters(-2.662), 0), + new Point3(Units.inchesToMeters(5.936), Units.inchesToMeters(-2.662), 0)), 0.1), - + k2020HighGoalOuter( + List.of( + new Point3(Units.inchesToMeters(9.819867), Units.inchesToMeters(8.5), 0), + new Point3(Units.inchesToMeters(-9.819867), Units.inchesToMeters(8.5), 0), + new Point3(Units.inchesToMeters(-19.625), Units.inchesToMeters(-8.5), 0), + new Point3(Units.inchesToMeters(19.625), Units.inchesToMeters(-8.5), 0)), + Units.inchesToMeters(12)), kCircularPowerCell7in( List.of( new Point3( @@ -94,36 +107,26 @@ public enum TargetModel implements Releasable { -Units.inchesToMeters(9.5) / 2, -Units.inchesToMeters(9.5) / 2)), 0), - k2016HighGoal( - List.of( - new Point3(Units.inchesToMeters(-10), Units.inchesToMeters(12), 0), - new Point3(Units.inchesToMeters(-10), Units.inchesToMeters(0), 0), - new Point3(Units.inchesToMeters(10), Units.inchesToMeters(0), 0), - new Point3(Units.inchesToMeters(10), Units.inchesToMeters(12), 0)), - Units.inchesToMeters(6)), - k200mmAprilTag( // Nominal edge length of 200 mm includes the white border, but solvePNP corners - // do not + k200mmAprilTag( // Corners of the tag's inner black square (excluding white border) List.of( - new Point3(-Units.inchesToMeters(3.25), Units.inchesToMeters(3.25), 0), new Point3(Units.inchesToMeters(3.25), Units.inchesToMeters(3.25), 0), - new Point3(Units.inchesToMeters(3.25), -Units.inchesToMeters(3.25), 0), - new Point3(-Units.inchesToMeters(3.25), -Units.inchesToMeters(3.25), 0)), + new Point3(-Units.inchesToMeters(3.25), Units.inchesToMeters(3.25), 0), + new Point3(-Units.inchesToMeters(3.25), -Units.inchesToMeters(3.25), 0), + new Point3(Units.inchesToMeters(3.25), -Units.inchesToMeters(3.25), 0)), Units.inchesToMeters(3.25 * 2)), - kAruco6in_16h5( // Nominal edge length of 200 mm includes the white border, but solvePNP corners - // do not + kAruco6in_16h5( // Corners of the tag's inner black square (excluding white border) List.of( new Point3(Units.inchesToMeters(3), Units.inchesToMeters(3), 0), new Point3(Units.inchesToMeters(3), -Units.inchesToMeters(3), 0), new Point3(-Units.inchesToMeters(3), -Units.inchesToMeters(3), 0), new Point3(Units.inchesToMeters(3), -Units.inchesToMeters(3), 0)), Units.inchesToMeters(3 * 2)), - k6in_16h5( // Nominal edge length of 200 mm includes the white border, but solvePNP corners - // do not + k6in_16h5( // Corners of the tag's inner black square (excluding white border) List.of( - new Point3(-Units.inchesToMeters(3), Units.inchesToMeters(3), 0), new Point3(Units.inchesToMeters(3), Units.inchesToMeters(3), 0), - new Point3(Units.inchesToMeters(3), -Units.inchesToMeters(3), 0), - new Point3(-Units.inchesToMeters(3), -Units.inchesToMeters(3), 0)), + new Point3(-Units.inchesToMeters(3), Units.inchesToMeters(3), 0), + new Point3(-Units.inchesToMeters(3), -Units.inchesToMeters(3), 0), + new Point3(Units.inchesToMeters(3), -Units.inchesToMeters(3), 0)), Units.inchesToMeters(3 * 2)); @JsonIgnore private MatOfPoint3f realWorldTargetCoordinates; diff --git a/photon-core/src/test/java/org/photonvision/common/util/CoordinateConversionTest.java b/photon-core/src/test/java/org/photonvision/common/util/CoordinateConversionTest.java new file mode 100644 index 0000000000..a452f517f5 --- /dev/null +++ b/photon-core/src/test/java/org/photonvision/common/util/CoordinateConversionTest.java @@ -0,0 +1,79 @@ +/* + * Copyright (C) Photon Vision. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package org.photonvision.common.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import edu.wpi.first.math.VecBuilder; +import edu.wpi.first.math.geometry.Rotation3d; +import edu.wpi.first.math.geometry.Transform3d; +import edu.wpi.first.math.geometry.Translation3d; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.photonvision.common.util.math.MathUtils; + +public class CoordinateConversionTest { + @BeforeAll + public static void Init() { + TestUtils.loadLibraries(); + } + + @Test + public void testAprilTagToOpenCV() { + // AprilTag and OpenCV both use the EDN coordinate system. AprilTag, however, assumes the tag's + // z-axis points away from the camera while we expect it to point towards the camera. + var apriltag = + new Transform3d( + new Translation3d(1, 2, 3), + new Rotation3d(Math.toRadians(5), Math.toRadians(10), Math.toRadians(15))); + var opencv = MathUtils.convertApriltagtoOpenCV(apriltag); + final var expectedTrl = new Translation3d(1, 2, 3); + assertEquals( + expectedTrl, opencv.getTranslation(), "AprilTag to OpenCV translation conversion failed"); + var apriltagXaxis = new Translation3d(1, 0, 0).rotateBy(apriltag.getRotation()); + var apriltagYaxis = new Translation3d(0, 1, 0).rotateBy(apriltag.getRotation()); + var apriltagZaxis = new Translation3d(0, 0, 1).rotateBy(apriltag.getRotation()); + var opencvXaxis = new Translation3d(1, 0, 0).rotateBy(opencv.getRotation()); + var opencvYaxis = new Translation3d(0, 1, 0).rotateBy(opencv.getRotation()); + var opencvZaxis = new Translation3d(0, 0, 1).rotateBy(opencv.getRotation()); + assertEquals( + apriltagXaxis.unaryMinus(), + opencvXaxis, + "AprilTag to OpenCV rotation conversion failed(X-axis)"); + assertEquals( + apriltagYaxis, opencvYaxis, "AprilTag to OpenCV rotation conversion failed(Y-axis)"); + assertEquals( + apriltagZaxis.unaryMinus(), + opencvZaxis, + "AprilTag to OpenCV rotation conversion failed(Z-axis)"); + } + + @Test + public void testOpenCVToPhoton() { + // OpenCV uses the EDN coordinate system while wpilib is in NWU. + var opencv = + new Transform3d( + new Translation3d(1, 2, 3), new Rotation3d(VecBuilder.fill(1, 2, 3), Math.PI / 8)); + var wpilib = MathUtils.convertOpenCVtoPhotonTransform(opencv); + final var expectedTrl = new Translation3d(3, -1, -2); + assertEquals( + expectedTrl, wpilib.getTranslation(), "OpenCV to WPILib translation conversion failed"); + var expectedRot = new Rotation3d(VecBuilder.fill(3, -1, -2), Math.PI / 8); + assertEquals(expectedRot, wpilib.getRotation(), "OpenCV to WPILib rotation conversion failed"); + } +} diff --git a/photon-core/src/test/java/org/photonvision/vision/pipeline/AprilTagTest.java b/photon-core/src/test/java/org/photonvision/vision/pipeline/AprilTagTest.java index 1ece69fc55..fe1b7b9d9c 100644 --- a/photon-core/src/test/java/org/photonvision/vision/pipeline/AprilTagTest.java +++ b/photon-core/src/test/java/org/photonvision/vision/pipeline/AprilTagTest.java @@ -75,20 +75,19 @@ public void testApriltagFacingCamera() { // these numbers are not *accurate*, but they are known and expected var pose = pipelineResult.targets.get(0).getBestCameraToTarget3d(); Assertions.assertEquals(2, pose.getTranslation().getX(), 0.2); - Assertions.assertEquals(0.0, pose.getTranslation().getY(), 0.2); + Assertions.assertEquals(0.1, pose.getTranslation().getY(), 0.2); Assertions.assertEquals(0.0, pose.getTranslation().getZ(), 0.2); - var objX = new Translation3d(1, 0, 0).rotateBy(pose.getRotation()).getY(); - var objY = new Translation3d(0, 1, 0).rotateBy(pose.getRotation()).getZ(); - var objZ = new Translation3d(0, 0, 1).rotateBy(pose.getRotation()).getX(); + // We expect the object axes to be in NWU, with the x-axis coming out of the tag + // This visible tag is facing the camera almost parallel, so in world space: - // We expect the object X to be forward, or -X in world space + // The object's X axis should be (-1, 0, 0) Assertions.assertEquals( -1, new Translation3d(1, 0, 0).rotateBy(pose.getRotation()).getX(), 0.1); - // We expect the object Y axis to be right, or negative-Y in world space + // The object's Y axis should be (0, -1, 0) Assertions.assertEquals( -1, new Translation3d(0, 1, 0).rotateBy(pose.getRotation()).getY(), 0.1); - // We expect the object Z axis to be up, or +Z in world space + // The object's Z axis should be (0, 0, 1) Assertions.assertEquals(1, new Translation3d(0, 0, 1).rotateBy(pose.getRotation()).getZ(), 0.1); } diff --git a/photon-core/src/test/java/org/photonvision/vision/pipeline/SolvePNPTest.java b/photon-core/src/test/java/org/photonvision/vision/pipeline/SolvePNPTest.java index 02852308b9..03bcc2535d 100644 --- a/photon-core/src/test/java/org/photonvision/vision/pipeline/SolvePNPTest.java +++ b/photon-core/src/test/java/org/photonvision/vision/pipeline/SolvePNPTest.java @@ -19,11 +19,12 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import edu.wpi.first.math.geometry.Rotation3d; import edu.wpi.first.math.geometry.Translation3d; import edu.wpi.first.math.util.Units; import java.util.stream.Collectors; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.photonvision.common.util.TestUtils; @@ -115,28 +116,31 @@ public void test2019() { pipeline.getSettings().hueInverted); frameProvider.requestHsvSettings(hsvParams); - CVPipelineResult pipelineResult; - - pipelineResult = pipeline.run(frameProvider.get(), QuirkyCamera.DefaultCamera); + CVPipelineResult pipelineResult = pipeline.run(frameProvider.get(), QuirkyCamera.DefaultCamera); printTestResults(pipelineResult); - // these numbers are not *accurate*, but they are known and expected - var pose = pipelineResult.targets.get(0).getBestCameraToTarget3d(); - Assertions.assertEquals(1.1, pose.getTranslation().getX(), 0.05); - Assertions.assertEquals(0.0, pose.getTranslation().getY(), 0.05); - - // We expect the object X to be forward, or -X in world space - Assertions.assertEquals( - -1, new Translation3d(1, 0, 0).rotateBy(pose.getRotation()).getX(), 0.05); - // We expect the object Y axis to be right, or negative-Y in world space - Assertions.assertEquals( - -1, new Translation3d(0, 1, 0).rotateBy(pose.getRotation()).getY(), 0.05); - // We expect the object Z axis to be up, or +Z in world space - Assertions.assertEquals( - 1, new Translation3d(0, 0, 1).rotateBy(pose.getRotation()).getZ(), 0.05); + // Draw on input + var outputPipe = new OutputStreamPipeline(); + outputPipe.process( + pipelineResult.inputAndOutputFrame, pipeline.getSettings(), pipelineResult.targets); TestUtils.showImage( - pipelineResult.inputAndOutputFrame.colorImage.getMat(), "Pipeline output", 999999); + pipelineResult.inputAndOutputFrame.processedImage.getMat(), "Pipeline output", 999999); + + var pose = pipelineResult.targets.get(0).getBestCameraToTarget3d(); + // these numbers are not *accurate*, but they are known and expected + var expectedTrl = new Translation3d(1.1, -0.05, -0.05); + assertTrue( + expectedTrl.getDistance(pose.getTranslation()) < 0.05, + "SolvePNP translation estimation failed"); + // We expect the object axes to be in NWU, with the x-axis coming out of the tag + // This target is facing the camera almost parallel, so in world space: + // The object's X axis should be (-1, 0, 0) + assertEquals(-1, new Translation3d(1, 0, 0).rotateBy(pose.getRotation()).getX(), 0.05); + // The object's Y axis should be (0, -1, 0) + assertEquals(-1, new Translation3d(0, 1, 0).rotateBy(pose.getRotation()).getY(), 0.05); + // The object's Z axis should be (0, 0, 1) + assertEquals(1, new Translation3d(0, 0, 1).rotateBy(pose.getRotation()).getZ(), 0.05); } @Test @@ -175,15 +179,27 @@ public void test2020() { outputPipe.process( pipelineResult.inputAndOutputFrame, pipeline.getSettings(), pipelineResult.targets); - // these numbers are not *accurate*, but they are known and expected - var pose = pipelineResult.targets.get(0).getBestCameraToTarget3d(); - Assertions.assertEquals(Units.inchesToMeters(240.26), pose.getTranslation().getX(), 0.05); - Assertions.assertEquals(Units.inchesToMeters(35), pose.getTranslation().getY(), 0.05); - // Z rotation should be mostly facing us - Assertions.assertEquals(Units.degreesToRadians(-140), pose.getRotation().getZ(), 1); - TestUtils.showImage( - pipelineResult.inputAndOutputFrame.colorImage.getMat(), "Pipeline output", 999999); + pipelineResult.inputAndOutputFrame.processedImage.getMat(), "Pipeline output", 999999); + + var pose = pipelineResult.targets.get(0).getBestCameraToTarget3d(); + // these numbers are not *accurate*, but they are known and expected + var expectedTrl = + new Translation3d( + Units.inchesToMeters(236), Units.inchesToMeters(36), Units.inchesToMeters(-53)); + assertTrue( + expectedTrl.getDistance(pose.getTranslation()) < 0.05, + "SolvePNP translation estimation failed"); + // We expect the object axes to be in NWU, with the x-axis coming out of the tag + // Rotation around Z axis (yaw) should be mostly facing us + var xAxis = new Translation3d(1, 0, 0); + var yAxis = new Translation3d(0, 1, 0); + var zAxis = new Translation3d(0, 0, 1); + var expectedRot = + new Rotation3d(Math.toRadians(-20), Math.toRadians(-20), Math.toRadians(-120)); + assertTrue(xAxis.rotateBy(expectedRot).getDistance(xAxis.rotateBy(pose.getRotation())) < 0.1); + assertTrue(yAxis.rotateBy(expectedRot).getDistance(yAxis.rotateBy(pose.getRotation())) < 0.1); + assertTrue(zAxis.rotateBy(expectedRot).getDistance(zAxis.rotateBy(pose.getRotation())) < 0.1); } private static void continuouslyRunPipeline(Frame frame, ReflectivePipelineSettings settings) {