diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index 9f55f3b634..17385a975d 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -196,6 +196,42 @@ namespace gtsam { return this->apply(g, &Ring::div); } + /// Compute sum of all values + double sum() const { + double sum = 0; + auto visitor = [&](double y) { sum += y; }; + this->visit(visitor); + return sum; + } + + /** + * @brief Helper method to perform normalization such that all leaves in the + * tree sum to 1 + * + * @param sum + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree normalize(double sum) const { + return this->apply([&sum](const double& x) { return x / sum; }); + } + + /// Find the minimum values amongst all leaves + double min() const { + double min = std::numeric_limits::max(); + auto visitor = [&](double x) { min = x < min ? x : min; }; + this->visit(visitor); + return min; + } + + /// Find the maximum values amongst all leaves + double max() const { + // Get the most negative value + double max = -std::numeric_limits::max(); + auto visitor = [&](double x) { max = x > max ? x : max; }; + this->visit(visitor); + return max; + } + /** sum out variable */ AlgebraicDecisionTree sum(const L& label, size_t cardinality) const { return this->combine(label, cardinality, &Ring::add); diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index 19f4686c27..d65a9c82b7 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -593,6 +593,55 @@ TEST(ADT, zero) { EXPECT_DOUBLES_EQUAL(0, anotb(x11), 1e-9); } +/// Example ADT from 0 to 11. +ADT exampleADT() { + DiscreteKey A(0, 2), B(1, 3), C(2, 2); + ADT f(A & B & C, "0 6 2 8 4 10 1 7 3 9 5 11"); + return f; +} +/* ************************************************************************** */ +// Test sum +TEST(ADT, Sum) { + ADT a = exampleADT(); + double expected_sum = 0; + for (double i = 0; i < 12; i++) { + expected_sum += i; + } + EXPECT_DOUBLES_EQUAL(expected_sum, a.sum(), 1e-9); +} + +/* ************************************************************************** */ +// Test normalize +TEST(ADT, Normalize) { + ADT a = exampleADT(); + double sum = a.sum(); + auto actual = a.normalize(sum); + + DiscreteKey A(0, 2), B(1, 3), C(2, 2); + DiscreteKeys keys = DiscreteKeys{A, B, C}; + std::vector cpt{0 / sum, 6 / sum, 2 / sum, 8 / sum, + 4 / sum, 10 / sum, 1 / sum, 7 / sum, + 3 / sum, 9 / sum, 5 / sum, 11 / sum}; + ADT expected(keys, cpt); + EXPECT(assert_equal(expected, actual)); +} + +/* ************************************************************************** */ +// Test min +TEST(ADT, Min) { + ADT a = exampleADT(); + double min = a.min(); + EXPECT_DOUBLES_EQUAL(0.0, min, 1e-9); +} + +/* ************************************************************************** */ +// Test max +TEST(ADT, Max) { + ADT a = exampleADT(); + double max = a.max(); + EXPECT_DOUBLES_EQUAL(11.0, max, 1e-9); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index 63b9144345..6bb97ff6cf 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -68,7 +68,7 @@ namespace gtsam { return fromAngle(theta * degree); } - /// Named constructor from cos(theta),sin(theta) pair, will *not* normalize! + /// Named constructor from cos(theta),sin(theta) pair static Rot2 fromCosSin(double c, double s); /** diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 3d816fc25a..095a350dd9 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -856,7 +856,7 @@ class Cal3_S2Stereo { gtsam::Matrix K() const; gtsam::Point2 principalPoint() const; double baseline() const; - Vector6 vector() const; + gtsam::Vector6 vector() const; gtsam::Matrix inverse() const; }; diff --git a/gtsam/hybrid/GaussianMixture.h b/gtsam/hybrid/GaussianMixture.h index 521a4ca7a3..c1ef504f89 100644 --- a/gtsam/hybrid/GaussianMixture.h +++ b/gtsam/hybrid/GaussianMixture.h @@ -67,7 +67,8 @@ class GTSAM_EXPORT GaussianMixture double logConstant_; ///< log of the normalization constant. /** - * @brief Convert a DecisionTree of factors into a DT of Gaussian FGs. + * @brief Convert a DecisionTree of factors into + * a DecisionTree of Gaussian factor graphs. */ GaussianFactorGraphTree asGaussianFactorGraphTree() const; @@ -214,7 +215,8 @@ class GTSAM_EXPORT GaussianMixture * @return AlgebraicDecisionTree A decision tree on the discrete keys * only, with the leaf values as the error for each assignment. */ - AlgebraicDecisionTree errorTree(const VectorValues &continuousValues) const; + AlgebraicDecisionTree errorTree( + const VectorValues &continuousValues) const; /** * @brief Compute the logProbability of this Gaussian Mixture. diff --git a/gtsam/hybrid/GaussianMixtureFactor.h b/gtsam/hybrid/GaussianMixtureFactor.h index 63ca9e9239..67d12ddb09 100644 --- a/gtsam/hybrid/GaussianMixtureFactor.h +++ b/gtsam/hybrid/GaussianMixtureFactor.h @@ -135,7 +135,8 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { * @return AlgebraicDecisionTree A decision tree with the same keys * as the factors involved, and leaf values as the error. */ - AlgebraicDecisionTree errorTree(const VectorValues &continuousValues) const; + AlgebraicDecisionTree errorTree( + const VectorValues &continuousValues) const; /** * @brief Compute the log-likelihood, including the log-normalizing constant. diff --git a/gtsam/hybrid/HybridValues.h b/gtsam/hybrid/HybridValues.h index 40d4b5e92a..09e7d773ca 100644 --- a/gtsam/hybrid/HybridValues.h +++ b/gtsam/hybrid/HybridValues.h @@ -12,6 +12,7 @@ /** * @file HybridValues.h * @date Jul 28, 2022 + * @author Varun Agrawal * @author Shangjie Xue */ @@ -54,13 +55,13 @@ class GTSAM_EXPORT HybridValues { HybridValues() = default; /// Construct from DiscreteValues and VectorValues. - HybridValues(const VectorValues &cv, const DiscreteValues &dv) - : continuous_(cv), discrete_(dv){} + HybridValues(const VectorValues& cv, const DiscreteValues& dv) + : continuous_(cv), discrete_(dv) {} /// Construct from all values types. HybridValues(const VectorValues& cv, const DiscreteValues& dv, const Values& v) - : continuous_(cv), discrete_(dv), nonlinear_(v){} + : continuous_(cv), discrete_(dv), nonlinear_(v) {} /// @} /// @name Testable @@ -101,9 +102,7 @@ class GTSAM_EXPORT HybridValues { bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); } /// Check whether a variable with key \c j exists in values. - bool existsNonlinear(Key j) { - return nonlinear_.exists(j); - } + bool existsNonlinear(Key j) { return nonlinear_.exists(j); } /// Check whether a variable with key \c j exists. bool exists(Key j) { @@ -128,9 +127,7 @@ class GTSAM_EXPORT HybridValues { } /// insert_or_assign() , similar to Values.h - void insert_or_assign(Key j, size_t value) { - discrete_[j] = value; - } + void insert_or_assign(Key j, size_t value) { discrete_[j] = value; } /** Insert all continuous values from \c values. Throws an invalid_argument * exception if any keys to be inserted are already used. */ diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index b8edc39d88..1cc28b386d 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -333,7 +333,6 @@ TEST(HybridEstimation, Probability) { for (auto discrete_conditional : *discreteBayesNet) { bayesNet->add(discrete_conditional); } - auto discreteConditional = discreteBayesNet->at(0)->asDiscrete(); HybridValues hybrid_values = bayesNet->optimize(); diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 93081d3090..751e84d915 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -680,12 +680,14 @@ conditional 0: Hybrid P( x0 | x1 m0) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.85087 ] + logNormalizationConstant: 1.38862 No noise model 1 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.95037 ] + logNormalizationConstant: 1.38862 No noise model conditional 1: Hybrid P( x1 | x2 m0 m1) @@ -696,12 +698,14 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.99901 ] + logNormalizationConstant: 1.3935 No noise model 0 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.90098 ] + logNormalizationConstant: 1.3935 No noise model 1 Choice(m0) @@ -709,12 +713,14 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10.098 ] + logNormalizationConstant: 1.3935 No noise model 1 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10 ] + logNormalizationConstant: 1.3935 No noise model conditional 2: Hybrid P( x2 | m0 m1) @@ -726,6 +732,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1489 ] mean: 1 elements x2: -1.0099 + logNormalizationConstant: 1.38857 No noise model 0 1 Leaf p(x2) @@ -733,6 +740,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1479 ] mean: 1 elements x2: -1.0098 + logNormalizationConstant: 1.38857 No noise model 1 Choice(m0) @@ -741,6 +749,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0504 ] mean: 1 elements x2: -1.0001 + logNormalizationConstant: 1.38857 No noise model 1 1 Leaf p(x2) @@ -748,6 +757,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0494 ] mean: 1 elements x2: -1 + logNormalizationConstant: 1.38857 No noise model )"; diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index b04878ac5d..861e19cc9e 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -243,5 +243,25 @@ namespace gtsam { } /* ************************************************************************* */ + double GaussianBayesNet::logNormalizationConstant() const { + /* + normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) + logConstant = -0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + + log det(Sigma)) = -2.0 * logDeterminant() + thus, logConstant = -0.5*n*log(2*pi) + logDeterminant() + + BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i()) + = sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) + = sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant() + */ + double logNormConst = 0.0; + for (const sharedConditional& cg : *this) { + logNormConst += cg->logNormalizationConstant(); + } + return logNormConst; + } + + /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index 19781d1e6d..ea1cb8603c 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -82,6 +82,12 @@ namespace gtsam { /** Check equality */ bool equals(const This& bn, double tol = 1e-9) const; + /// Check exact equality. + friend bool operator==(const GaussianBayesNet& lhs, + const GaussianBayesNet& rhs) { + return lhs.isEqual(rhs); + } + /// print graph void print( const std::string& s = "", @@ -228,6 +234,14 @@ namespace gtsam { * @return The determinant */ double logDeterminant() const; + /** + * @brief Get the log of the normalization constant corresponding to the + * joint Gaussian density represented by this Bayes net. + * + * @return double + */ + double logNormalizationConstant() const; + /** * Backsubstitute with a different RHS vector than the one stored in this BayesNet. * gy=inv(R*inv(Sigma))*gx diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 0112835aa4..f986eed02f 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -121,6 +121,7 @@ namespace gtsam { const auto mean = solve({}); // solve for mean. mean.print(" mean", formatter); } + cout << " logNormalizationConstant: " << logNormalizationConstant() << std::endl; if (model_) model_->print(" Noise model: "); else @@ -184,8 +185,13 @@ namespace gtsam { double GaussianConditional::logNormalizationConstant() const { constexpr double log2pi = 1.8378770664093454835606594728112; size_t n = d().size(); - // log det(Sigma)) = - 2.0 * logDeterminant() - return - 0.5 * n * log2pi + logDeterminant(); + // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} + // log det(Sigma) = -log(det(R'R)) = -2*log(det(R)) + // Hence, log det(Sigma)) = -2.0 * logDeterminant() + // which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDeterminant()) + // = -0.5*n*log(2*pi) + (0.5*2.0 * logDeterminant()) + // = -0.5*n*log(2*pi) + logDeterminant() + return -0.5 * n * log2pi + logDeterminant(); } /* ************************************************************************* */ diff --git a/gtsam/linear/VectorValues.h b/gtsam/linear/VectorValues.h index 7fbd43ffc6..2fa50b7f6f 100644 --- a/gtsam/linear/VectorValues.h +++ b/gtsam/linear/VectorValues.h @@ -263,6 +263,11 @@ namespace gtsam { /** equals required by Testable for unit testing */ bool equals(const VectorValues& x, double tol = 1e-9) const; + /// Check equality. + friend bool operator==(const VectorValues& lhs, const VectorValues& rhs) { + return lhs.equals(rhs); + } + /// @{ /// @name Advanced Interface /// @{ diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 4b0963b8db..ed19eaa3b5 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -375,7 +375,8 @@ virtual class JacobianFactor : gtsam::GaussianFactor { void serialize() const; }; -pair EliminateQR(const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys); +pair EliminateQR( + const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys); #include virtual class HessianFactor : gtsam::GaussianFactor { @@ -510,12 +511,17 @@ virtual class GaussianConditional : gtsam::JacobianFactor { GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S, size_t name2, gtsam::Matrix T, const gtsam::noiseModel::Diagonal* sigmas); + GaussianConditional(const vector> terms, + size_t nrFrontals, gtsam::Vector d, + const gtsam::noiseModel::Diagonal* sigmas); // Constructors with no noise model GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R); GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S); GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S, size_t name2, gtsam::Matrix T); + GaussianConditional(const gtsam::KeyVector& keys, size_t nrFrontals, + const gtsam::VerticalBlockMatrix& augmentedMatrix); // Named constructors static gtsam::GaussianConditional FromMeanAndStddev(gtsam::Key key, diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 966b70915c..99453ee4ef 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -80,6 +80,8 @@ TEST(GaussianBayesNet, Evaluate1) { smallBayesNet.at(0)->logNormalizationConstant() + smallBayesNet.at(1)->logNormalizationConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(), + 1e-9); const double actual = smallBayesNet.evaluate(mean); EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); } diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index a4a7220128..dcd8218894 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -516,6 +516,7 @@ TEST(GaussianConditional, Print) { " d = [ 20 40 ]\n" " mean: 1 elements\n" " x0: 20 40\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected, conditional, "GaussianConditional")); @@ -530,6 +531,7 @@ TEST(GaussianConditional, Print) { " S[x1] = [ -1 -2 ]\n" " [ -3 -4 ]\n" " d = [ 20 40 ]\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional")); @@ -545,6 +547,7 @@ TEST(GaussianConditional, Print) { " S[y1] = [ -5 -6 ]\n" " [ -7 -8 ]\n" " d = [ 20 40 ]\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional")); } diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index 0b5449b586..47857a2a2e 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -424,6 +424,11 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, ISAM2Result result(params_.enableDetailedResults); UpdateImpl update(params_, updateParams); + // Initialize any new variables \Theta_{new} and add + // \Theta:=\Theta\cup\Theta_{new}. + // Needed before delta update if using Dogleg optimizer. + addVariables(newTheta, result.details()); + // Update delta if we need it to check relinearization later if (update.relinarizationNeeded(update_count_)) updateDelta(updateParams.forceFullSolve); @@ -435,9 +440,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, update.computeUnusedKeys(newFactors, variableIndex_, result.keysWithRemovedFactors, &result.unusedKeys); - // 2. Initialize any new variables \Theta_{new} and add - // \Theta:=\Theta\cup\Theta_{new}. - addVariables(newTheta, result.details()); + // 2. Compute new error to check for relinearization if (params_.evaluateNonlinearError) update.error(nonlinearFactors_, calculateEstimate(), &result.errorBefore); @@ -731,6 +734,7 @@ void ISAM2::updateDelta(bool forceFullSolve) const { effectiveWildfireThreshold, &delta_); deltaReplacedMask_.clear(); gttoc(Wildfire_update); + } else if (std::holds_alternative(params_.optimizationParams)) { // If using Dogleg, do a Dogleg step const ISAM2DoglegParams& doglegParams = @@ -769,9 +773,8 @@ void ISAM2::updateDelta(bool forceFullSolve) const { gttic(Copy_dx_d); // Update Delta and linear step doglegDelta_ = doglegResult.delta; - delta_ = - doglegResult - .dx_d; // Copy the VectorValues containing with the linear solution + // Copy the VectorValues containing with the linear solution + delta_ = doglegResult.dx_d; gttoc(Copy_dx_d); } else { throw std::runtime_error("iSAM2: unknown ISAM2Params type"); diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 9084a50fa7..f40deab5aa 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -381,17 +381,22 @@ typedef gtsam::GncOptimizer> G #include virtual class LevenbergMarquardtOptimizer : gtsam::NonlinearOptimizer { LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph, - const gtsam::Values& initialValues); + const gtsam::Values& initialValues, + const gtsam::LevenbergMarquardtParams& params = + gtsam::LevenbergMarquardtParams()); LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& initialValues, - const gtsam::LevenbergMarquardtParams& params); + const gtsam::Ordering& ordering, + const gtsam::LevenbergMarquardtParams& params = + gtsam::LevenbergMarquardtParams()); + double lambda() const; void print(string s = "") const; }; #include class ISAM2GaussNewtonParams { - ISAM2GaussNewtonParams(); + ISAM2GaussNewtonParams(double _wildfireThreshold = 0.001); void print(string s = "") const; diff --git a/gtsam/symbolic/SymbolicFactor-inst.h b/gtsam/symbolic/SymbolicFactor-inst.h index db0eb05caf..a5410aad51 100644 --- a/gtsam/symbolic/SymbolicFactor-inst.h +++ b/gtsam/symbolic/SymbolicFactor-inst.h @@ -42,7 +42,9 @@ namespace gtsam // Gather all keys KeySet allKeys; for(const std::shared_ptr& factor: factors) { - allKeys.insert(factor->begin(), factor->end()); + // Non-active factors are nullptr + if (factor) + allKeys.insert(factor->begin(), factor->end()); } // Check keys diff --git a/gtsam_unstable/gtsam_unstable.i b/gtsam_unstable/gtsam_unstable.i index 8566934dd8..3beb845fbb 100644 --- a/gtsam_unstable/gtsam_unstable.i +++ b/gtsam_unstable/gtsam_unstable.i @@ -671,6 +671,21 @@ class AHRS { //void print(string s) const; }; +#include +template +virtual class PartialPriorFactor : gtsam::NoiseModelFactor { + PartialPriorFactor(gtsam::Key key, size_t idx, double prior, + const gtsam::noiseModel::Base* model); + PartialPriorFactor(gtsam::Key key, const std::vector& indices, + const gtsam::Vector& prior, + const gtsam::noiseModel::Base* model); + + // enabling serialization functionality + void serialize() const; + + const gtsam::Vector& prior(); +}; + // Tectonic SAM Factors #include diff --git a/gtsam_unstable/slam/PartialPriorFactor.h b/gtsam_unstable/slam/PartialPriorFactor.h index b70fe8b197..7722e5d82b 100644 --- a/gtsam_unstable/slam/PartialPriorFactor.h +++ b/gtsam_unstable/slam/PartialPriorFactor.h @@ -50,9 +50,6 @@ namespace gtsam { Vector prior_; ///< Measurement on tangent space parameters, in compressed form. std::vector indices_; ///< Indices of the measured tangent space parameters. - /** default constructor - only use for serialization */ - PartialPriorFactor() {} - /** * constructor with just minimum requirements for a factor - allows more * computation in the constructor. This should only be used by subclasses @@ -65,7 +62,8 @@ namespace gtsam { // Provide access to the Matrix& version of evaluateError: using Base::evaluateError; - ~PartialPriorFactor() override {} + /** default constructor - only use for serialization */ + PartialPriorFactor() {} /** Single Element Constructor: Prior on a single parameter at index 'idx' in the tangent vector.*/ PartialPriorFactor(Key key, size_t idx, double prior, const SharedNoiseModel& model) : @@ -85,6 +83,8 @@ namespace gtsam { assert(model->dim() == (size_t)prior.size()); } + ~PartialPriorFactor() override {} + /// @return a deep copy of this factor gtsam::NonlinearFactor::shared_ptr clone() const override { return std::static_pointer_cast( diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 7e270e4e86..9bbde7023a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -29,11 +29,8 @@ if(POLICY CMP0057) cmake_policy(SET CMP0057 NEW) endif() -# Prefer system pybind11 first, if not found, rely on bundled version: -find_package(pybind11 CONFIG QUIET) -if (NOT pybind11_FOUND) - add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11) -endif() +# Use bundled pybind11 version +add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11) # Set the wrapping script variable set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py") @@ -189,6 +186,8 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) gtsam::BinaryMeasurementsPoint3 gtsam::BinaryMeasurementsUnit3 gtsam::BinaryMeasurementsRot3 + gtsam::SimWall2DVector + gtsam::SimPolygon2DVector gtsam::CameraSetCal3_S2 gtsam::CameraSetCal3Bundler gtsam::CameraSetCal3Unified @@ -272,6 +271,20 @@ add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY} VERBATIM) +# if (NOT WIN32) # WIN32=1 is target platform is Windows +# add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} +# COMMAND stubgen -q -p gtsam -o ./stubs && cp -a stubs/gtsam/ gtsam && ${PYTHON_EXECUTABLE} -m pip install --user . +# DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} +# WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) +# else() +# #TODO(Varun) Find equivalent cp on Windows +# add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} +# COMMAND ${PYTHON_EXECUTABLE} -m pip install --user . +# DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} +# WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) +# endif() + + # Custom make command to run all GTSAM Python tests add_custom_target( python-test diff --git a/python/dev_requirements.txt b/python/dev_requirements.txt index 6970ee6139..6a5e7f9241 100644 --- a/python/dev_requirements.txt +++ b/python/dev_requirements.txt @@ -1,2 +1,3 @@ -r requirements.txt -pyparsing>=2.4.2 \ No newline at end of file +pyparsing>=2.4.2 +mypy==1.4.1 #TODO(Varun) A bug in mypy>=1.5.0 causes an unresolved placeholder error when importing numpy>=2.0.0 (https://github.com/python/mypy/issues/17396) \ No newline at end of file diff --git a/python/gtsam/examples/README.md b/python/gtsam/examples/README.md index 2a2c9f2ef2..70f6cfcb09 100644 --- a/python/gtsam/examples/README.md +++ b/python/gtsam/examples/README.md @@ -17,7 +17,7 @@ | InverseKinematicsExampleExpressions.cpp | | | ISAM2Example_SmartFactor | | | ISAM2_SmartFactorStereo_IMU | | -| LocalizationExample | impossible? | +| LocalizationExample | :heavy_check_mark: | | METISOrderingExample | | | OdometryExample | :heavy_check_mark: | | PlanarSLAMExample | :heavy_check_mark: | diff --git a/python/gtsam_unstable/examples/LocalizationExample.py b/python/gtsam_unstable/examples/LocalizationExample.py new file mode 100644 index 0000000000..ad8266d6d6 --- /dev/null +++ b/python/gtsam_unstable/examples/LocalizationExample.py @@ -0,0 +1,68 @@ +""" +A simple 2D pose slam example with "GPS" measurements + - The robot moves forward 2 meter each iteration + - The robot initially faces along the X axis (horizontal, to the right in 2D) + - We have full odometry between pose + - We have "GPS-like" measurements implemented with a custom factor +""" +import numpy as np + +import gtsam +from gtsam import BetweenFactorPose2, Pose2, noiseModel +from gtsam_unstable import PartialPriorFactorPose2 + + +def main(): + # 1. Create a factor graph container and add factors to it. + graph = gtsam.NonlinearFactorGraph() + + # 2a. Add odometry factors + # For simplicity, we will use the same noise model for each odometry factor + odometryNoise = noiseModel.Diagonal.Sigmas(np.asarray([0.2, 0.2, 0.1])) + + # Create odometry (Between) factors between consecutive poses + graph.push_back( + BetweenFactorPose2(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise)) + graph.push_back( + BetweenFactorPose2(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise)) + + # 2b. Add "GPS-like" measurements + # We will use PartialPrior factor for this. + unaryNoise = noiseModel.Diagonal.Sigmas(np.array([0.1, + 0.1])) # 10cm std on x,y + + graph.push_back( + PartialPriorFactorPose2(1, [0, 1], np.asarray([0.0, 0.0]), unaryNoise)) + graph.push_back( + PartialPriorFactorPose2(2, [0, 1], np.asarray([2.0, 0.0]), unaryNoise)) + graph.push_back( + PartialPriorFactorPose2(3, [0, 1], np.asarray([4.0, 0.0]), unaryNoise)) + graph.print("\nFactor Graph:\n") + + # 3. Create the data structure to hold the initialEstimate estimate to the solution + # For illustrative purposes, these have been deliberately set to incorrect values + initialEstimate = gtsam.Values() + initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2)) + initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2)) + initialEstimate.insert(3, Pose2(4.1, 0.1, 0.1)) + initialEstimate.print("\nInitial Estimate:\n") + + # 4. Optimize using Levenberg-Marquardt optimization. The optimizer + # accepts an optional set of configuration parameters, controlling + # things like convergence criteria, the type of linear system solver + # to use, and the amount of information displayed during optimization. + # Here we will use the default set of parameters. See the + # documentation for the full set of parameters. + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimize() + result.print("Final Result:\n") + + # 5. Calculate and print marginal covariances for all variables + marginals = gtsam.Marginals(graph, result) + print("x1 covariance:\n", marginals.marginalCovariance(1)) + print("x2 covariance:\n", marginals.marginalCovariance(2)) + print("x3 covariance:\n", marginals.marginalCovariance(3)) + + +if __name__ == "__main__": + main() diff --git a/python/gtsam_unstable/gtsam_unstable.tpl b/python/gtsam_unstable/gtsam_unstable.tpl index b98872c46c..006ca7fc8f 100644 --- a/python/gtsam_unstable/gtsam_unstable.tpl +++ b/python/gtsam_unstable/gtsam_unstable.tpl @@ -9,6 +9,7 @@ #include #include +#include #include #include #include diff --git a/python/setup.py.in b/python/setup.py.in index 824a6656ed..b9d7392c7e 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -13,6 +13,7 @@ package_data = { "./*.so", "./*.dll", "./*.pyd", + "*.pyi", "**/*.pyi", # Add the type hints ] } diff --git a/tests/testDoglegOptimizer.cpp b/tests/testDoglegOptimizer.cpp index e64e5ab7ad..6fbc522d36 100644 --- a/tests/testDoglegOptimizer.cpp +++ b/tests/testDoglegOptimizer.cpp @@ -24,10 +24,9 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include "examples/SFMdata.h" #include @@ -36,7 +35,6 @@ using namespace gtsam; // Convenience for named keys using symbol_shorthand::X; -using symbol_shorthand::L; /* ************************************************************************* */ TEST(DoglegOptimizer, ComputeBlend) { @@ -185,6 +183,128 @@ TEST(DoglegOptimizer, Constraint) { #endif } +/* ************************************************************************* */ +/** + * Test created to fix issue in ISAM2 when using the DogLegOptimizer. + * Originally reported by kvmanohar22 in issue #301 + * https://github.com/borglab/gtsam/issues/301 + * + * This test is based on a script provided by kvmanohar22 + * to help reproduce the issue. + */ +TEST(DogLegOptimizer, VariableUpdate) { + // Make the typename short so it looks much cleaner + typedef SmartProjectionPoseFactor SmartFactor; + + // create a typedef to the camera type + typedef PinholePose Camera; + // Define the camera calibration parameters + Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); + + // Define the camera observation noise model + noiseModel::Isotropic::shared_ptr measurementNoise = + noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v + + // Create the set of ground-truth landmarks and poses + vector points = createPoints(); + vector poses = createPoses(); + + // Create a factor graph + NonlinearFactorGraph graph; + + ISAM2DoglegParams doglegparams = ISAM2DoglegParams(); + doglegparams.verbose = false; + ISAM2Params isam2_params; + isam2_params.evaluateNonlinearError = true; + isam2_params.relinearizeThreshold = 0.0; + isam2_params.enableRelinearization = true; + isam2_params.optimizationParams = doglegparams; + isam2_params.relinearizeSkip = 1; + ISAM2 isam2(isam2_params); + + // Simulated measurements from each camera pose, adding them to the factor + // graph + unordered_map smart_factors; + for (size_t j = 0; j < points.size(); ++j) { + // every landmark represent a single landmark, we use shared pointer to init + // the factor, and then insert measurements. + SmartFactor::shared_ptr smartfactor(new SmartFactor(measurementNoise, K)); + + for (size_t i = 0; i < poses.size(); ++i) { + // generate the 2D measurement + Camera camera(poses[i], K); + Point2 measurement = camera.project(points[j]); + + // call add() function to add measurement into a single factor, here we + // need to add: + // 1. the 2D measurement + // 2. the corresponding camera's key + // 3. camera noise model + // 4. camera calibration + + // add only first 3 measurements and update the later measurements + // incrementally + if (i < 3) smartfactor->add(measurement, i); + } + + // insert the smart factor in the graph + smart_factors[j] = smartfactor; + graph.push_back(smartfactor); + } + + // Add a prior on pose x0. This indirectly specifies where the origin is. + // 30cm std on x,y,z 0.1 rad on roll,pitch,yaw + noiseModel::Diagonal::shared_ptr noise = noiseModel::Diagonal::Sigmas( + (Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)).finished()); + graph.emplace_shared >(0, poses[0], noise); + + // Because the structure-from-motion problem has a scale ambiguity, the + // problem is still under-constrained. Here we add a prior on the second pose + // x1, so this will fix the scale by indicating the distance between x0 and + // x1. Because these two are fixed, the rest of the poses will be also be + // fixed. + graph.emplace_shared >(1, poses[1], + noise); // add directly to graph + + // Create the initial estimate to the solution + // Intentionally initialize the variables off from the ground truth + Values initialEstimate; + Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20)); + for (size_t i = 0; i < 3; ++i) + initialEstimate.insert(i, poses[i].compose(delta)); + // initialEstimate.print("Initial Estimates:\n"); + + // Optimize the graph and print results + isam2.update(graph, initialEstimate); + Values result = isam2.calculateEstimate(); + // result.print("Results:\n"); + + // we add new measurements from this pose + size_t pose_idx = 3; + + // Now update existing smart factors with new observations + for (size_t j = 0; j < points.size(); ++j) { + SmartFactor::shared_ptr smartfactor = smart_factors[j]; + + // add the 4th measurement + Camera camera(poses[pose_idx], K); + Point2 measurement = camera.project(points[j]); + smartfactor->add(measurement, pose_idx); + } + + graph.resize(0); + initialEstimate.clear(); + + // update initial estimate for the new pose + initialEstimate.insert(pose_idx, poses[pose_idx].compose(delta)); + + // this should break the system + isam2.update(graph, initialEstimate); + result = isam2.calculateEstimate(); + EXPECT(std::find(result.keys().begin(), result.keys().end(), pose_idx) != + result.keys().end()); +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/tests/testGaussianISAM2.cpp b/tests/testGaussianISAM2.cpp index 1ea60dac95..721ed51c02 100644 --- a/tests/testGaussianISAM2.cpp +++ b/tests/testGaussianISAM2.cpp @@ -994,6 +994,56 @@ TEST(ISAM2, calculate_nnz) EXPECT_LONGS_EQUAL(expected, actual); } +class FixActiveFactor : public NoiseModelFactorN { + using Base = NoiseModelFactorN; + bool is_active_; + +public: + FixActiveFactor(const gtsam::Key& key, const bool active) + : Base(nullptr, key), is_active_(active) {} + + virtual bool active(const gtsam::Values &values) const override { + return is_active_; + } + + virtual Vector + evaluateError(const Vector2& x, + Base::OptionalMatrixTypeT H = nullptr) const override { + if (H) { + *H = Vector2::Identity(); + } + return Vector2::Zero(); + } +}; + +TEST(ActiveFactorTesting, Issue1596) { + // Issue1596: When a derived Nonlinear Factor is not active, the linearization returns a nullptr (NonlinearFactor.cpp:156), which + // causes an error when `EliminateSymbolic` is called (SymbolicFactor-inst.h:45) due to not checking if the factor is nullptr. + const gtsam::Key key{Symbol('x', 0)}; + + ISAM2 isam; + Values values; + NonlinearFactorGraph graph; + + // Insert an active factor + values.insert(key, Vector2::Zero()); + graph.emplace_shared(key, true); + + // No problem here + isam.update(graph, values); + + graph = NonlinearFactorGraph(); + + // Inserting a factor that is never active + graph.emplace_shared(key, false); + + // This call throws the error if the pointer is not validated on (SymbolicFactor-inst.h:45) + isam.update(graph); + + // If the bug is fixed, this line is reached. + EXPECT(isam.getFactorsUnsafe().size() == 2); +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr);} /* ************************************************************************* */