-
Notifications
You must be signed in to change notification settings - Fork 196
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Allowing External Angle Sources in Pose Estimation #1309
base: master
Are you sure you want to change the base?
Changes from all commits
58983a1
6466001
f4893c1
89889a8
b286515
1963205
ea6496f
5839bbc
e8f6378
c0cd623
647d9e6
16c3279
8258411
420e8df
16bd701
5cc67dc
b96a5d1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,13 +27,13 @@ | |
import edu.wpi.first.apriltag.AprilTagFieldLayout; | ||
import edu.wpi.first.hal.FRCNetComm.tResourceType; | ||
import edu.wpi.first.hal.HAL; | ||
import edu.wpi.first.math.MathUtil; | ||
import edu.wpi.first.math.Matrix; | ||
import edu.wpi.first.math.Pair; | ||
import edu.wpi.first.math.geometry.Pose2d; | ||
import edu.wpi.first.math.geometry.Pose3d; | ||
import edu.wpi.first.math.geometry.Rotation3d; | ||
import edu.wpi.first.math.geometry.Transform3d; | ||
import edu.wpi.first.math.geometry.Translation3d; | ||
import edu.wpi.first.math.controller.PIDController; | ||
import edu.wpi.first.math.geometry.*; | ||
import edu.wpi.first.math.interpolation.Interpolator; | ||
import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; | ||
import edu.wpi.first.math.numbers.N1; | ||
import edu.wpi.first.math.numbers.N3; | ||
import edu.wpi.first.math.numbers.N8; | ||
|
@@ -43,10 +43,12 @@ | |
import java.util.List; | ||
import java.util.Optional; | ||
import java.util.Set; | ||
import org.opencv.core.Point; | ||
import org.photonvision.estimation.TargetModel; | ||
import org.photonvision.estimation.VisionEstimation; | ||
import org.photonvision.targeting.PhotonPipelineResult; | ||
import org.photonvision.targeting.PhotonTrackedTarget; | ||
import org.photonvision.targeting.TargetCorner; | ||
|
||
/** | ||
* The PhotonPoseEstimator class filters or combines readings from all the AprilTags visible at a | ||
|
@@ -83,7 +85,14 @@ public enum PoseStrategy { | |
* Use all visible tags to compute a single pose estimate. This runs on the RoboRIO, and can | ||
* take a lot of time. | ||
*/ | ||
MULTI_TAG_PNP_ON_RIO | ||
MULTI_TAG_PNP_ON_RIO, | ||
|
||
/** | ||
* Use the best visible tag to compute a single pose estimate. This runs on the RoboRIO in order | ||
* to access the robot's yaw heading, and MUST have setReferencePose called every frame so | ||
* heading data is up-to-date. | ||
*/ | ||
IMU_SOLVE_BEST_TAG | ||
} | ||
|
||
private AprilTagFieldLayout fieldTags; | ||
|
@@ -97,6 +106,7 @@ public enum PoseStrategy { | |
protected double poseCacheTimestampSeconds = -1; | ||
private final Set<Integer> reportedErrors = new HashSet<>(); | ||
|
||
|
||
/** | ||
* Create a new PhotonPoseEstimator. | ||
* | ||
|
@@ -259,6 +269,7 @@ public void setLastPose(Pose2d lastPose) { | |
setLastPose(new Pose3d(lastPose)); | ||
} | ||
|
||
|
||
/** | ||
* @return The current transform from the center of the robot to the camera mount position | ||
*/ | ||
|
@@ -380,6 +391,9 @@ private Optional<EstimatedRobotPose> update( | |
case MULTI_TAG_PNP_ON_COPROCESSOR: | ||
estimatedPose = multiTagOnCoprocStrategy(cameraResult); | ||
break; | ||
case IMU_SOLVE_BEST_TAG: | ||
estimatedPose = imuTagOnRioStrategy(cameraResult, cameraMatrix); | ||
break; | ||
default: | ||
DriverStation.reportError( | ||
"[PhotonPoseEstimator] Unknown Position Estimation Strategy!", false); | ||
|
@@ -413,6 +427,71 @@ private Optional<EstimatedRobotPose> multiTagOnCoprocStrategy(PhotonPipelineResu | |
} | ||
} | ||
|
||
private Optional<EstimatedRobotPose> imuTagOnRioStrategy( | ||
PhotonPipelineResult result, Optional<Matrix<N3, N3>> cameraMatrixOpt) { | ||
if (result.hasTargets() && cameraMatrixOpt.isPresent()) { | ||
PhotonTrackedTarget target = result.getBestTarget(); | ||
|
||
List<TargetCorner> targetCorners = target.getDetectedCorners(); | ||
double sumX = 0.0; | ||
double sumY = 0.0; | ||
for (TargetCorner t : targetCorners) { | ||
sumX += t.x; | ||
sumY += t.y; | ||
} | ||
|
||
Point tagCenter = new Point(sumX / 4, sumY / 4); | ||
|
||
Rotation3d tagAngle = PhotonUtils.correctPixelRot(tagCenter, cameraMatrixOpt.get()); | ||
|
||
int tagID = target.getFiducialId(); | ||
Optional<Pose3d> tagPose = fieldTags.getTagPose(tagID); | ||
|
||
if (tagPose.isEmpty()) { | ||
reportFiducialPoseError(tagID); | ||
return Optional.empty(); | ||
} | ||
|
||
// calculate distance to the tag | ||
double hypot = | ||
-PhotonUtils.calculateDistanceToTargetMeters( | ||
robotToCamera.getZ(), tagPose.get().getZ(), robotToCamera.getY(), tagAngle.getY()); | ||
Comment on lines
+456
to
+458
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you're right, that looks wrong. I think I may have fixed that locally during testing awhile back, but I'll make sure it gets fixed when I rewrite to use the PhotonUtils function. |
||
|
||
// calculating the translation of the tag from the robot center on the floor plane | ||
double robotX = Math.sin(tagAngle.getZ() + robotToCamera.getRotation().getZ()) * hypot; | ||
double robotY = -Math.cos(tagAngle.getZ() + robotToCamera.getRotation().getZ()) * hypot; | ||
|
||
Translation2d tagToCamera = new Translation2d(robotX, robotY); | ||
|
||
Translation2d robotToTag = tagToCamera.plus(robotToCamera.getTranslation().toTranslation2d()); | ||
|
||
Optional<Rotation2d> headingState = PhotonUtils.getHeadingSample(result.getTimestampSeconds()); | ||
if(headingState.isEmpty()) { | ||
return Optional.empty(); | ||
} | ||
|
||
Rotation2d robotHeading = headingState.get(); | ||
|
||
// rotating the robot-relative tag position by the IMU yaw to make it field-relative | ||
Translation2d fixedTrans = robotToTag.rotateBy(robotHeading); | ||
|
||
Translation2d finalTranslation = | ||
tagPose.get().getTranslation().toTranslation2d().plus(fixedTrans); | ||
|
||
Pose3d robotPosition = | ||
new Pose3d(new Pose2d(finalTranslation, robotHeading)); | ||
|
||
return Optional.of( | ||
new EstimatedRobotPose( | ||
robotPosition, | ||
result.getTimestampSeconds(), | ||
result.getTargets(), | ||
PoseStrategy.IMU_SOLVE_BEST_TAG)); | ||
} else { | ||
return Optional.empty(); | ||
} | ||
} | ||
|
||
private Optional<EstimatedRobotPose> multiTagOnRioStrategy( | ||
PhotonPipelineResult result, | ||
Optional<Matrix<N3, N3>> cameraMatrixOpt, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -22,9 +22,16 @@ | |
* SOFTWARE. | ||
*/ | ||
|
||
|
||
package org.photonvision; | ||
|
||
import edu.wpi.first.math.Matrix; | ||
import edu.wpi.first.math.geometry.*; | ||
import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; | ||
import edu.wpi.first.math.numbers.N3; | ||
import org.opencv.core.Point; | ||
|
||
import java.util.Optional; | ||
|
||
public final class PhotonUtils { | ||
private PhotonUtils() { | ||
|
@@ -206,4 +213,50 @@ public static Rotation2d getYawToPose(Pose2d robotPose, Pose2d targetPose) { | |
public static double getDistanceToPose(Pose2d robotPose, Pose2d targetPose) { | ||
return robotPose.getTranslation().getDistance(targetPose.getTranslation()); | ||
} | ||
|
||
/** | ||
* Corrects a given pixel for perspective distortion | ||
* | ||
* @param pixel pixel to be corrected | ||
* @param camIntrinsics the distortion matrix of the camera | ||
* @return the angles | ||
*/ | ||
public static Rotation3d correctPixelRot(Point pixel, Matrix<N3, N3> camIntrinsics) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe just a merge commit meme, but getCorrectedPixelRot already exists? Let's try to only implement this agorithm in only one place |
||
double fx = camIntrinsics.get(0, 0); | ||
double cx = camIntrinsics.get(0, 2); | ||
double xOffset = cx - pixel.x; | ||
|
||
double fy = camIntrinsics.get(1, 1); | ||
double cy = camIntrinsics.get(1, 2); | ||
double yOffset = cy - pixel.y; | ||
|
||
// calculate yaw normally | ||
var yaw = new Rotation2d(fx, xOffset); | ||
// correct pitch based on yaw | ||
var pitch = new Rotation2d(fy / Math.cos(Math.atan(xOffset / fx)), -yOffset); | ||
|
||
return new Rotation3d(0, pitch.getRadians(), yaw.getRadians()); | ||
} | ||
|
||
|
||
|
||
private static final TimeInterpolatableBuffer<Rotation2d> headingBuffer = TimeInterpolatableBuffer.createBuffer( | ||
1.0 | ||
); | ||
Comment on lines
+243
to
+245
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Photonutils should be stateless. |
||
|
||
private static final TimeInterpolatableBuffer<Double> headingVelocityBuffer = TimeInterpolatableBuffer.createDoubleBuffer(1.0); | ||
public static void registerHeadingState(Rotation2d robotAngle, double velocity, double timestamp) { | ||
headingBuffer.addSample(timestamp, robotAngle); | ||
headingVelocityBuffer.addSample(timestamp, velocity); | ||
} | ||
|
||
public static Optional<Rotation2d> getHeadingSample(double timestamp) { | ||
return headingBuffer.getSample(timestamp); | ||
} | ||
|
||
private static Optional<Double> getHeadingVelocitySample(double timestamp) { | ||
return headingVelocityBuffer.getSample(timestamp); | ||
} | ||
|
||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a difference from all the logic here to this PhotonUtils function? If not, why not use it?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think there's any major differences, so I'm not entirely sure why I decided to write it on my own. The PhotonUtils function seems to do what I want, so I'll rewrite imuTagOnRioStrategy to use it. Thank you for pointing that out, I'm not sure I realized it was there.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
One thing that's werid to me about that utils function, is how it works when the camera yaw isn't the gyro yaw. The only place where the robotToCamera yaw is usrd is in the end, but the only thing that'll do is change our angle. The field position would still be wrong (from the transform here)
Maybe that transform needs to also take the robotToCamera yaw into account? I may be missing something.
Looking more, a lot of the logic seems very weird to me. Perhaps your way is better.