Skip to content

Commit

Permalink
Yay
Browse files Browse the repository at this point in the history
  • Loading branch information
mcm001 committed Aug 4, 2024
1 parent 33455e0 commit 72e0ef4
Show file tree
Hide file tree
Showing 6 changed files with 46 additions and 51 deletions.
7 changes: 3 additions & 4 deletions photon-lib/src/main/java/org/photonvision/PhotonCamera.java
Original file line number Diff line number Diff line change
Expand Up @@ -137,13 +137,12 @@ public PhotonCamera(NetworkTableInstance instance, String cameraName) {
cameraTable
.getRawTopic("rawBytes")
.subscribe(
"rawBytes", new byte[] {},
"rawBytes",
new byte[] {},
PubSubOption.periodic(0.01),
PubSubOption.sendAll(true),
PubSubOption.pollStorage(20));
resultSubscriber =
new PacketSubscriber<>(
rawBytesEntry, PhotonPipelineResult.photonStruct);
resultSubscriber = new PacketSubscriber<>(rawBytesEntry, PhotonPipelineResult.photonStruct);
driverModePublisher = cameraTable.getBooleanTopic("driverModeRequest").publish();
driverModeSubscriber = cameraTable.getBooleanTopic("driverMode").subscribe(false);
inputSaveImgEntry = cameraTable.getIntegerTopic("inputSaveImgCmd").getEntry(0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -393,8 +393,7 @@ private Optional<EstimatedRobotPose> update(
return estimatedPose;
}

private Optional<EstimatedRobotPose> multiTagOnCoprocStrategy(
PhotonPipelineResult result) {
private Optional<EstimatedRobotPose> multiTagOnCoprocStrategy(PhotonPipelineResult result) {
if (result.getMultiTagResult().isPresent()) {
var best_tf = result.getMultiTagResult().get().estimatedPose.best;
var best =
Expand Down
13 changes: 6 additions & 7 deletions photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,6 @@ PhotonPipelineResult PhotonCamera::GetLatestResult() {
// Prints warning if not connected
VerifyVersion();

// Clear the current packet.
packet.Clear();

// Fill the packet with latest data and populate result.
units::microsecond_t now =
units::microsecond_t(frc::RobotController::GetFPGATime());
Expand All @@ -151,8 +148,9 @@ std::vector<PhotonPipelineResult> PhotonCamera::GetAllUnreadResults() {

const auto changes = rawBytesEntry.ReadQueue();

// Create the new result list -- these will be updated in-place
std::vector<PhotonPipelineResult> ret(changes.size());
// Create the new result list
std::vector<PhotonPipelineResult> ret;
ret.reserve(changes.size());

for (size_t i = 0; i < changes.size(); i++) {
const nt::Timestamped<std::vector<uint8_t>>& value = changes[i];
Expand All @@ -163,13 +161,14 @@ std::vector<PhotonPipelineResult> PhotonCamera::GetAllUnreadResults() {

// Fill the packet with latest data and populate result.
photon::Packet packet{value.value};
auto result = packet.Unpack<PhotonPipelineResult>();

PhotonPipelineResult& result = ret[i];
packet >> result;
// TODO: NT4 timestamps are still not to be trusted. But it's the best we
// can do until we can make time sync more reliable.
result.SetRecieveTimestamp(units::microsecond_t(value.time) -
result.GetLatency());

ret.push_back(result);
}

return ret;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ PhotonPipelineResult PhotonCameraSim::Process(
continue;
}

PNPResult pnpSim{};
std::optional<photon::PnpResult> pnpSim{};
if (tgt.fiducialId >= 0 && tgt.GetFieldVertices().size() == 4) {
pnpSim = OpenCVHelp::SolvePNP_Square(
prop.GetIntrinsics(), prop.GetDistCoeffs(),
Expand All @@ -226,8 +226,10 @@ PhotonPipelineResult PhotonCameraSim::Process(
-centerRot.Z().convert<units::degrees>().to<double>(),
-centerRot.Y().convert<units::degrees>().to<double>(), areaPercent,
centerRot.X().convert<units::degrees>().to<double>(), tgt.fiducialId,
tgt.objDetClassId, tgt.objDetConf, pnpSim.best, pnpSim.alt,
pnpSim.ambiguity, smallVec, cornersDouble);
tgt.objDetClassId, tgt.objDetConf,
pnpSim ? pnpSim->best : frc::Transform3d{},
pnpSim ? pnpSim->alt : frc::Transform3d{},
pnpSim ? pnpSim->ambiguity : -1, smallVec, cornersDouble);
}

if (videoSimRawEnabled) {
Expand Down Expand Up @@ -291,16 +293,14 @@ PhotonPipelineResult PhotonCameraSim::Process(
1, videoSimFrameProcessed)),
cv::LINE_AA);

wpi::SmallVector<std::pair<double, double>, 4> smallVec =
tgt.GetMinAreaRectCorners();
auto smallVec = tgt.GetMinAreaRectCorners();

std::vector<std::pair<float, float>> cornersCopy{};
cornersCopy.reserve(4);

for (const auto& corner : smallVec) {
cornersCopy.emplace_back(
std::make_pair(static_cast<float>(corner.first),
static_cast<float>(corner.second)));
cornersCopy.emplace_back(std::make_pair(
static_cast<float>(corner.x), static_cast<float>(corner.y)));
}

VideoSimUtil::DrawPoly(
Expand All @@ -321,12 +321,13 @@ PhotonPipelineResult PhotonCameraSim::Process(
std::vector<frc::AprilTag> visibleLayoutTags =
VisionEstimation::GetVisibleLayoutTags(detectableTgts, tagLayout);
if (visibleLayoutTags.size() > 1) {
wpi::SmallVector<int16_t, 32> usedIds{};
std::vector<int16_t> usedIds{};
usedIds.reserve(32);
std::transform(visibleLayoutTags.begin(), visibleLayoutTags.end(),
usedIds.begin(),
[](const frc::AprilTag& tag) { return tag.ID; });
std::sort(usedIds.begin(), usedIds.end());
PNPResult pnpResult = VisionEstimation::EstimateCamPosePNP(
auto pnpResult = VisionEstimation::EstimateCamPosePNP(
prop.GetIntrinsics(), prop.GetDistCoeffs(), detectableTgts, tagLayout,
kAprilTag36h11);
multiTagResults = MultiTargetPNPResult{pnpResult, usedIds};
Expand All @@ -346,7 +347,7 @@ void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result,
recieveTimestamp);

Packet newPacket{};
newPacket << result;
newPacket.Pack(result);

ts.rawBytesEntry.Set(newPacket.GetData(), recieveTimestamp);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ static frc::Rotation3d RVecToRotation(const cv::Mat& rvecInput) {
units::radian_t{data[2]}});
}

[[maybe_unused]] static photon::PnpResult SolvePNP_Square(
[[maybe_unused]] static std::optional<photon::PnpResult> SolvePNP_Square(
const Eigen::Matrix<double, 3, 3>& cameraMatrix,
const Eigen::Matrix<double, 8, 1>& distCoeffs,
std::vector<frc::Translation3d> modelTrls,
Expand Down Expand Up @@ -233,6 +233,7 @@ static frc::Rotation3d RVecToRotation(const cv::Mat& rvecInput) {

if (std::isnan(errors[0])) {
fmt::print("SolvePNP_Square failed!\n");
return std::nullopt;
}
if (alt) {
photon::PnpResult result;
Expand All @@ -241,18 +242,16 @@ static frc::Rotation3d RVecToRotation(const cv::Mat& rvecInput) {
result.ambiguity = errors[0] / errors[1];
result.bestReprojErr = errors[0];
result.altReprojErr = errors[1];
result.isPresent = true;
return result;
} else {
photon::PnpResult result;
result.best = best;
result.bestReprojErr = errors[0];
result.isPresent = true;
return result;
}
}

[[maybe_unused]] static photon::PnpResult SolvePNP_SQPNP(
[[maybe_unused]] static std::optional<photon::PnpResult> SolvePNP_SQPNP(
const Eigen::Matrix<double, 3, 3>& cameraMatrix,
const Eigen::Matrix<double, 8, 1>& distCoeffs,
std::vector<frc::Translation3d> modelTrls,
Expand Down Expand Up @@ -286,7 +285,6 @@ static frc::Rotation3d RVecToRotation(const cv::Mat& rvecInput) {
photon::PnpResult result;
result.best = best;
result.bestReprojErr = error;
result.isPresent = true;
return result;
}
} // namespace OpenCVHelp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static std::vector<frc::AprilTag> GetVisibleLayoutTags(
return retVal;
}

static PnpResult EstimateCamPosePNP(
static std::optional<PnpResult> EstimateCamPosePNP(
const Eigen::Matrix<double, 3, 3>& cameraMatrix,
const Eigen::Matrix<double, 8, 1>& distCoeffs,
const std::vector<PhotonTrackedTarget>& visTags,
Expand Down Expand Up @@ -76,44 +76,43 @@ static PnpResult EstimateCamPosePNP(
std::vector<cv::Point2f> points = OpenCVHelp::CornersToPoints(corners);

if (knownTags.size() == 1) {
PnpResult camToTag = OpenCVHelp::SolvePNP_Square(
cameraMatrix, distCoeffs, tagModel.GetVertices(), points);
if (!camToTag.isPresent) {
auto camToTag = OpenCVHelp::SolvePNP_Square(cameraMatrix, distCoeffs,
tagModel.GetVertices(), points);
if (!camToTag) {
return PnpResult{};
}
frc::Pose3d bestPose =
knownTags[0].pose.TransformBy(camToTag.best.Inverse());
knownTags[0].pose.TransformBy(camToTag->best.Inverse());
frc::Pose3d altPose{};
if (camToTag.ambiguity != 0) {
altPose = knownTags[0].pose.TransformBy(camToTag.alt.Inverse());
if (camToTag->ambiguity != 0) {
altPose = knownTags[0].pose.TransformBy(camToTag->alt.Inverse());
}
frc::Pose3d o{};
PnpResult result{};
result.best = frc::Transform3d{o, bestPose};
result.alt = frc::Transform3d{o, altPose};
result.ambiguity = camToTag.ambiguity;
result.bestReprojErr = camToTag.bestReprojErr;
result.altReprojErr = camToTag.altReprojErr;
result.ambiguity = camToTag->ambiguity;
result.bestReprojErr = camToTag->bestReprojErr;
result.altReprojErr = camToTag->altReprojErr;
return result;
} else {
std::vector<frc::Translation3d> objectTrls{};
for (const auto& tag : knownTags) {
auto verts = tagModel.GetFieldVertices(tag.pose);
objectTrls.insert(objectTrls.end(), verts.begin(), verts.end());
}
PnpResult camToOrigin = OpenCVHelp::SolvePNP_SQPNP(cameraMatrix, distCoeffs,
objectTrls, points);
if (!camToOrigin.isPresent) {
return PnpResult{};
auto camToOrigin = OpenCVHelp::SolvePNP_SQPNP(cameraMatrix, distCoeffs,
objectTrls, points);
if (!camToOrigin) {
return std::nullopt;
} else {
PnpResult result{};
result.best = camToOrigin.best.Inverse(),
result.alt = camToOrigin.alt.Inverse(),
result.ambiguity = camToOrigin.ambiguity;
result.bestReprojErr = camToOrigin.bestReprojErr;
result.altReprojErr = camToOrigin.altReprojErr;
result.isPresent = true;
return result;
PnpResult ret{};
ret.best = camToOrigin.best.Inverse(),
ret.alt = camToOrigin.alt.Inverse(),
ret.ambiguity = camToOrigin.ambiguity;
ret.bestReprojErr = camToOrigin.bestReprojErr;
ret.altReprojErr = camToOrigin.altReprojErr;
return ret;
}
}
}
Expand Down

0 comments on commit 72e0ef4

Please sign in to comment.