diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 9cf19638c7..3f0c9f5118 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -465,6 +465,12 @@ string DiscreteConditional::html(const KeyFormatter& keyFormatter, double DiscreteConditional::evaluate(const HybridValues& x) const { return this->evaluate(x.discrete()); } + +/* ************************************************************************* */ +double DiscreteConditional::negLogConstant() const { + return 0.0; +} + /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 4c7af14896..e16100d0aa 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -264,11 +264,12 @@ class GTSAM_EXPORT DiscreteConditional } /** - * logNormalizationConstant K is just zero, such that - * logProbability(x) = log(evaluate(x)) = - error(x) - * and hence error(x) = - log(evaluate(x)) > 0 for all x. + * negLogConstant is just zero, such that + * -logProbability(x) = -log(evaluate(x)) = error(x) + * and hence error(x) > 0 for all x. + * Thus -log(K) for the normalization constant k is 0. */ - double logNormalizationConstant() const override { return 0.0; } + double negLogConstant() const override; /// @} diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index 0bdebd0e17..55c8f9e227 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -112,7 +112,7 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { const std::vector& table); // Standard interface - double logNormalizationConstant() const; + double negLogConstant() const; double logProbability(const gtsam::DiscreteValues& values) const; double evaluate(const gtsam::DiscreteValues& values) const; double error(const gtsam::DiscreteValues& values) const; diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 3cb3bba65d..cac2adcf87 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -161,18 +161,18 @@ double HybridConditional::logProbability(const HybridValues &values) const { } /* ************************************************************************ */ -double HybridConditional::logNormalizationConstant() const { +double HybridConditional::negLogConstant() const { if (auto gc = asGaussian()) { - return gc->logNormalizationConstant(); + return gc->negLogConstant(); } if (auto gm = asHybrid()) { - return gm->logNormalizationConstant(); // 0.0! + return gm->negLogConstant(); // 0.0! } if (auto dc = asDiscrete()) { - return dc->logNormalizationConstant(); // 0.0! + return dc->negLogConstant(); // 0.0! } throw std::runtime_error( - "HybridConditional::logProbability: conditional type not handled"); + "HybridConditional::negLogConstant: conditional type not handled"); } /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 51eeeb5bb6..588c44e0bc 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -193,11 +193,12 @@ class GTSAM_EXPORT HybridConditional double logProbability(const HybridValues& values) const override; /** - * Return the log normalization constant. + * Return the negative log of the normalization constant. + * This shows up in the error as -(error(x) + negLogConstant) * Note this is 0.0 for discrete and hybrid conditionals, but depends * on the continuous parameters for Gaussian conditionals. */ - double logNormalizationConstant() const override; + double negLogConstant() const override; /// Return the probability (or density) of the underlying conditional. double evaluate(const HybridValues& values) const override; diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index fb943366cb..ef38237f2b 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -36,7 +36,7 @@ HybridGaussianFactor::FactorValuePairs GetFactorValuePairs( // Check if conditional is pruned if (conditional) { // Assign log(\sqrt(|2πΣ|)) = -log(1 / sqrt(|2πΣ|)) - value = -conditional->logNormalizationConstant(); + value = conditional->negLogConstant(); } return {std::dynamic_pointer_cast(conditional), value}; }; @@ -51,14 +51,14 @@ HybridGaussianConditional::HybridGaussianConditional( discreteParents, GetFactorValuePairs(conditionals)), BaseConditional(continuousFrontals.size()), conditionals_(conditionals) { - // Calculate logConstant_ as the minimum of the log normalizers of the - // conditionals, by visiting the decision tree: - logConstant_ = std::numeric_limits::infinity(); + // Calculate negLogConstant_ as the minimum of the negative-log normalizers of + // the conditionals, by visiting the decision tree: + negLogConstant_ = std::numeric_limits::infinity(); conditionals_.visit( [this](const GaussianConditional::shared_ptr &conditional) { if (conditional) { - this->logConstant_ = std::min( - this->logConstant_, -conditional->logNormalizationConstant()); + this->negLogConstant_ = + std::min(this->negLogConstant_, conditional->negLogConstant()); } }); } @@ -84,8 +84,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() auto wrap = [this](const GaussianConditional::shared_ptr &gc) { // First check if conditional has not been pruned if (gc) { - const double Cgm_Kgcm = - -this->logConstant_ - gc->logNormalizationConstant(); + const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_; // If there is a difference in the covariances, we need to account for // that since the error is dependent on the mode. if (Cgm_Kgcm > 0.0) { @@ -156,8 +155,7 @@ void HybridGaussianConditional::print(const std::string &s, std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; } std::cout << std::endl - << " logNormalizationConstant: " << logNormalizationConstant() - << std::endl + << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; conditionals_.print( "", [&](Key k) { return formatter(k); }, @@ -215,8 +213,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( [&](const GaussianConditional::shared_ptr &conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); - const double Cgm_Kgcm = - -logConstant_ - conditional->logNormalizationConstant(); + const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; if (Cgm_Kgcm == 0.0) { return {likelihood_m, 0.0}; } else { diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 4a5fdcc89e..9c70cf6cb9 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -66,7 +66,7 @@ class GTSAM_EXPORT HybridGaussianConditional Conditionals conditionals_; ///< a decision tree of Gaussian conditionals. ///< Negative-log of the normalization constant (log(\sqrt(|2πΣ|))). ///< Take advantage of the neg-log space so everything is a minimization - double logConstant_; + double negLogConstant_; /** * @brief Convert a HybridGaussianConditional of conditionals into @@ -150,9 +150,15 @@ class GTSAM_EXPORT HybridGaussianConditional /// Returns the continuous keys among the parents. KeyVector continuousParents() const; - /// The log normalization constant is max of the the individual - /// log-normalization constants. - double logNormalizationConstant() const override { return -logConstant_; } + /** + * @brief Return log normalization constant in negative log space. + * + * The log normalization constant is the min of the individual + * log-normalization constants. + * + * @return double + */ + inline double negLogConstant() const override { return negLogConstant_; } /** * Create a likelihood factor for a hybrid Gaussian conditional, diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index a6fe955eb3..82828bb41e 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -233,18 +233,18 @@ continuousElimination(const HybridGaussianFactorGraph &factors, /* ************************************************************************ */ /** - * @brief Exponentiate log-values, not necessarily normalized, normalize, and - * return as AlgebraicDecisionTree. + * @brief Exponentiate (not necessarily normalized) negative log-values, + * normalize, and then return as AlgebraicDecisionTree. * * @param logValues DecisionTree of (unnormalized) log values. * @return AlgebraicDecisionTree */ -static AlgebraicDecisionTree probabilitiesFromLogValues( +static AlgebraicDecisionTree probabilitiesFromNegativeLogValues( const AlgebraicDecisionTree &logValues) { // Perform normalization - double max_log = logValues.max(); + double min_log = logValues.min(); AlgebraicDecisionTree probabilities = DecisionTree( - logValues, [&max_log](const double x) { return exp(x - max_log); }); + logValues, [&min_log](const double x) { return exp(-(x - min_log)); }); probabilities = probabilities.normalize(probabilities.sum()); return probabilities; @@ -265,13 +265,13 @@ discreteElimination(const HybridGaussianFactorGraph &factors, auto logProbability = [&](const GaussianFactor::shared_ptr &factor) -> double { if (!factor) return 0.0; - return -factor->error(VectorValues()); + return factor->error(VectorValues()); }; AlgebraicDecisionTree logProbabilities = DecisionTree(gmf->factors(), logProbability); AlgebraicDecisionTree probabilities = - probabilitiesFromLogValues(logProbabilities); + probabilitiesFromNegativeLogValues(logProbabilities); dfg.emplace_shared(gmf->discreteKeys(), probabilities); @@ -321,23 +321,23 @@ using Result = std::pair, static std::shared_ptr createDiscreteFactor( const DecisionTree &eliminationResults, const DiscreteKeys &discreteSeparator) { - auto logProbability = [&](const Result &pair) -> double { + auto negLogProbability = [&](const Result &pair) -> double { const auto &[conditional, factor] = pair; static const VectorValues kEmpty; // If the factor is not null, it has no keys, just contains the residual. if (!factor) return 1.0; // TODO(dellaert): not loving this. - // Logspace version of: + // Negative logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); - // We take negative of the logNormalizationConstant `log(k)` - // to get `log(1/k) = log(\sqrt{|2πΣ|})`. - return -factor->error(kEmpty) - conditional->logNormalizationConstant(); + // negLogConstant gives `-log(k)` + // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. + return factor->error(kEmpty) - conditional->negLogConstant(); }; - AlgebraicDecisionTree logProbabilities( - DecisionTree(eliminationResults, logProbability)); + AlgebraicDecisionTree negLogProbabilities( + DecisionTree(eliminationResults, negLogProbability)); AlgebraicDecisionTree probabilities = - probabilitiesFromLogValues(logProbabilities); + probabilitiesFromNegativeLogValues(negLogProbabilities); return std::make_shared(discreteSeparator, probabilities); } @@ -355,8 +355,9 @@ static std::shared_ptr createHybridGaussianFactor( auto hf = std::dynamic_pointer_cast(factor); if (!hf) throw std::runtime_error("Expected HessianFactor!"); // Add 2.0 term since the constant term will be premultiplied by 0.5 - // as per the Hessian definition - hf->constantTerm() += 2.0 * conditional->logNormalizationConstant(); + // as per the Hessian definition, + // and negative since we want log(k) + hf->constantTerm() += -2.0 * conditional->negLogConstant(); } return {factor, 0.0}; }; diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i index ef179eb9d8..82881ac478 100644 --- a/gtsam/hybrid/hybrid.i +++ b/gtsam/hybrid/hybrid.i @@ -61,7 +61,7 @@ virtual class HybridConditional { size_t nrParents() const; // Standard interface: - double logNormalizationConstant() const; + double negLogConstant() const; double logProbability(const gtsam::HybridValues& values) const; double evaluate(const gtsam::HybridValues& values) const; double operator()(const gtsam::HybridValues& values) const; diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index dde493685b..adcf6c70fd 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -61,11 +61,11 @@ const HybridGaussianConditional hybrid_conditional({Z(0)}, {X(0)}, mode, TEST(HybridGaussianConditional, Invariants) { using namespace equal_constants; - // Check that the conditional normalization constant is the max of all - // constants which are all equal, in this case, hence: - const double K = hybrid_conditional.logNormalizationConstant(); - EXPECT_DOUBLES_EQUAL(K, conditionals[0]->logNormalizationConstant(), 1e-8); - EXPECT_DOUBLES_EQUAL(K, conditionals[1]->logNormalizationConstant(), 1e-8); + // Check that the conditional (negative log) normalization constant is the min + // of all constants which are all equal, in this case, hence: + const double K = hybrid_conditional.negLogConstant(); + EXPECT_DOUBLES_EQUAL(K, conditionals[0]->negLogConstant(), 1e-8); + EXPECT_DOUBLES_EQUAL(K, conditionals[1]->negLogConstant(), 1e-8); EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv0)); EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv1)); @@ -180,15 +180,16 @@ TEST(HybridGaussianConditional, Error2) { // Check result. DiscreteKeys discrete_keys{mode}; - double logNormalizer0 = -conditionals[0]->logNormalizationConstant(); - double logNormalizer1 = -conditionals[1]->logNormalizationConstant(); - double minLogNormalizer = std::min(logNormalizer0, logNormalizer1); + double negLogConstant0 = conditionals[0]->negLogConstant(); + double negLogConstant1 = conditionals[1]->negLogConstant(); + double minErrorConstant = std::min(negLogConstant0, negLogConstant1); - // Expected error is e(X) + log(|2πΣ|). - // We normalize log(|2πΣ|) with min(logNormalizers) so it is non-negative. + // Expected error is e(X) + log(sqrt(|2πΣ|)). + // We normalize log(sqrt(|2πΣ|)) with min(negLogConstant) + // so it is non-negative. std::vector leaves = { - conditionals[0]->error(vv) + logNormalizer0 - minLogNormalizer, - conditionals[1]->error(vv) + logNormalizer1 - minLogNormalizer}; + conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, + conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -196,9 +197,9 @@ TEST(HybridGaussianConditional, Error2) { // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) - - conditionals[mode]->logNormalizationConstant() - - minLogNormalizer, + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + + conditionals[mode]->negLogConstant() - + minErrorConstant, hybrid_conditional.error(hv), 1e-8); } } @@ -230,8 +231,8 @@ TEST(HybridGaussianConditional, Likelihood2) { CHECK(jf1->rows() == 2); // Check that the constant C1 is properly encoded in the JacobianFactor. - const double C1 = hybrid_conditional.logNormalizationConstant() - - conditionals[1]->logNormalizationConstant(); + const double C1 = + conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant(); const double c1 = std::sqrt(2.0 * C1); Vector expected_unwhitened(2); expected_unwhitened << 4.9 - 5.0, -c1; diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index cd12d4f109..4b664b8b4a 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -780,9 +780,8 @@ static HybridGaussianFactorGraph CreateFactorGraph( // Create HybridGaussianFactor // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) - std::vector factors{ - {f0, -model0->logNormalizationConstant()}, - {f1, -model1->logNormalizationConstant()}}; + std::vector factors{{f0, model0->negLogConstant()}, + {f1, model1->negLogConstant()}}; HybridGaussianFactor motionFactor({X(0), X(1)}, m1, factors); HybridGaussianFactorGraph hfg; diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 347cc5f1fe..495444c79d 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -902,9 +902,8 @@ static HybridNonlinearFactorGraph CreateFactorGraph( // Create HybridNonlinearFactor // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) - std::vector factors{ - {f0, -model0->logNormalizationConstant()}, - {f1, -model1->logNormalizationConstant()}}; + std::vector factors{{f0, model0->negLogConstant()}, + {f1, model1->negLogConstant()}}; HybridNonlinearFactor mixtureFactor({X(0), X(1)}, m1, factors); diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index 9377e8cc49..c21a75d265 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -59,16 +59,8 @@ double Conditional::evaluate( /* ************************************************************************* */ template -double Conditional::logNormalizationConstant() - const { - throw std::runtime_error( - "Conditional::logNormalizationConstant is not implemented"); -} - -/* ************************************************************************* */ -template -double Conditional::normalizationConstant() const { - return std::exp(logNormalizationConstant()); +double Conditional::negLogConstant() const { + throw std::runtime_error("Conditional::negLogConstant is not implemented"); } /* ************************************************************************* */ @@ -83,13 +75,9 @@ bool Conditional::CheckInvariants( const double logProb = conditional.logProbability(values); if (std::abs(prob_or_density - std::exp(logProb)) > 1e-9) return false; // logProb is not consistent with prob_or_density - if (std::abs(conditional.logNormalizationConstant() - - std::log(conditional.normalizationConstant())) > 1e-9) - return false; // log normalization constant is not consistent with - // normalization constant const double error = conditional.error(values); if (error < 0.0) return false; // prob_or_density is negative. - const double expected = conditional.logNormalizationConstant() - error; + const double expected = -(conditional.negLogConstant() + error); if (std::abs(logProb - expected) > 1e-9) return false; // logProb is not consistent with error return true; diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index c76c05ab15..f9da36b7bc 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -34,11 +34,13 @@ namespace gtsam { * `logProbability` is the main methods that need to be implemented in derived * classes. These two methods relate to the `error` method in the factor by: * probability(x) = k exp(-error(x)) - * where k is a normalization constant making \int probability(x) == 1.0, and - * logProbability(x) = K - error(x) - * i.e., K = log(K). The normalization constant K is assumed to *not* depend + * where k is a normalization constant making + * \int probability(x) = \int k exp(-error(x)) == 1.0, and + * logProbability(x) = -(K + error(x)) + * i.e., K = -log(k). The normalization constant k is assumed to *not* depend * on any argument, only (possibly) on the conditional parameters. - * This class provides a default logNormalizationConstant() == 0.0. + * This class provides a default negative log normalization constant + * negLogConstant() == 0.0. * * There are four broad classes of conditionals that derive from Conditional: * @@ -163,13 +165,12 @@ namespace gtsam { } /** - * All conditional types need to implement a log normalization constant to - * make it such that error>=0. + * @brief All conditional types need to implement this as the negative log + * of the normalization constant to make it such that error>=0. + * + * @return double */ - virtual double logNormalizationConstant() const; - - /** Non-virtual, exponentiate logNormalizationConstant. */ - double normalizationConstant() const; + virtual double negLogConstant() const; /// @} /// @name Advanced Interface @@ -208,9 +209,9 @@ namespace gtsam { * - evaluate >= 0.0 * - evaluate(x) == conditional(x) * - exp(logProbability(x)) == evaluate(x) - * - logNormalizationConstant() = log(normalizationConstant()) + * - negLogConstant() = -log(normalizationConstant()) * - error >= 0.0 - * - logProbability(x) == logNormalizationConstant() - error(x) + * - logProbability(x) == -(negLogConstant() + error(x)) * * @param conditional The conditional to test, as a reference to the derived type. * @tparam VALUES HybridValues, or a more narrow type like DiscreteValues. diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 861e19cc9e..f8c4ddc4c2 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -243,23 +243,24 @@ namespace gtsam { } /* ************************************************************************* */ - double GaussianBayesNet::logNormalizationConstant() const { + double GaussianBayesNet::negLogConstant() const { /* normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - logConstant = -0.5 * n*log(2*pi) - 0.5 * log det(Sigma) - - log det(Sigma)) = -2.0 * logDeterminant() - thus, logConstant = -0.5*n*log(2*pi) + logDeterminant() - - BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i()) - = sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) - = sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant() + negLogConstant = -log(normalizationConstant) + = 0.5 * n*log(2*pi) + 0.5 * log(det(Sigma)) + + log(det(Sigma)) = -2.0 * logDeterminant() + thus, negLogConstant = 0.5*n*log(2*pi) - logDeterminant() + + BayesNet negLogConstant = sum(0.5*n_i*log(2*pi) - logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) + bn->logDeterminant() */ - double logNormConst = 0.0; + double negLogNormConst = 0.0; for (const sharedConditional& cg : *this) { - logNormConst += cg->logNormalizationConstant(); + negLogNormConst += cg->negLogConstant(); } - return logNormConst; + return negLogNormConst; } /* ************************************************************************* */ diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index ea1cb8603c..e858bbe43d 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -235,12 +235,12 @@ namespace gtsam { double logDeterminant() const; /** - * @brief Get the log of the normalization constant corresponding to the - * joint Gaussian density represented by this Bayes net. + * @brief Get the negative log of the normalization constant corresponding + * to the joint Gaussian density represented by this Bayes net. * * @return double */ - double logNormalizationConstant() const; + double negLogConstant() const; /** * Backsubstitute with a different RHS vector than the one stored in this BayesNet. diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 585bbdb331..7735e5129b 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -121,7 +121,7 @@ namespace gtsam { const auto mean = solve({}); // solve for mean. mean.print(" mean", formatter); } - cout << " logNormalizationConstant: " << logNormalizationConstant() << endl; + cout << " logNormalizationConstant: " << -negLogConstant() << endl; if (model_) model_->print(" Noise model: "); else @@ -181,24 +181,24 @@ namespace gtsam { /* ************************************************************************* */ // normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - // log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) - double GaussianConditional::logNormalizationConstant() const { + // neg-log = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) + double GaussianConditional::negLogConstant() const { constexpr double log2pi = 1.8378770664093454835606594728112; size_t n = d().size(); // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} // log det(Sigma) = -log(det(R'R)) = -2*log(det(R)) // Hence, log det(Sigma)) = -2.0 * logDeterminant() - // which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDeterminant()) - // = -0.5*n*log(2*pi) + (0.5*2.0 * logDeterminant()) - // = -0.5*n*log(2*pi) + logDeterminant() - return -0.5 * n * log2pi + logDeterminant(); + // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDeterminant()) + // = 0.5*n*log(2*pi) - (0.5*2.0 * logDeterminant()) + // = 0.5*n*log(2*pi) - logDeterminant() + return 0.5 * n * log2pi - logDeterminant(); } /* ************************************************************************* */ // density = k exp(-error(x)) // log = log(k) - error(x) double GaussianConditional::logProbability(const VectorValues& x) const { - return logNormalizationConstant() - error(x); + return -(negLogConstant() + error(x)); } double GaussianConditional::logProbability(const HybridValues& x) const { diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 420efabca9..27270fece1 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -133,10 +133,14 @@ namespace gtsam { /// @{ /** - * normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - * log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + * @brief Return the negative log of the normalization constant. + * + * normalization constant k = 1.0 / sqrt((2*pi)^n*det(Sigma)) + * -log(k) = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) + * + * @return double */ - double logNormalizationConstant() const override; + double negLogConstant() const override; /** * Calculate log-probability log(evaluate(x)) for given values `x`: diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index de69fdce97..1236f1b946 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -255,18 +255,17 @@ double Gaussian::logDeterminant() const { } /* *******************************************************************************/ -double Gaussian::logNormalizationConstant() const { +double Gaussian::negLogConstant() const { // log(det(Sigma)) = -2.0 * logDetR - // which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDetR()) - // = -0.5*n*log(2*pi) + (0.5*2.0 * logDetR()) - // = -0.5*n*log(2*pi) + logDetR() + // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDetR()) + // = 0.5*n*log(2*pi) - (0.5*2.0 * logDetR()) + // = 0.5*n*log(2*pi) - logDetR() size_t n = dim(); constexpr double log2pi = 1.8378770664093454835606594728112; - // Get 1/log(\sqrt(|2pi Sigma|)) = -0.5*log(|2pi Sigma|) - return -0.5 * n * log2pi + logDetR(); + // Get -log(1/\sqrt(|2pi Sigma|)) = 0.5*log(|2pi Sigma|) + return 0.5 * n * log2pi - logDetR(); } - /* ************************************************************************* */ // Diagonal /* ************************************************************************* */ diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 91c11d4101..34fa63e4cc 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -272,14 +272,12 @@ namespace gtsam { double logDeterminant() const; /** - * @brief Method to compute the normalization constant - * for a Gaussian noise model k = \sqrt(1/|2πΣ|). - * We compute this in the log-space for numerical accuracy, - * thus returning log(k). - * - * @return double - */ - double logNormalizationConstant() const; + * @brief Compute the negative log of the normalization constant + * for a Gaussian noise model k = 1/\sqrt(|2πΣ|). + * + * @return double + */ + double negLogConstant() const; private: #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 9c448de50b..af6c2ee225 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -548,7 +548,7 @@ virtual class GaussianConditional : gtsam::JacobianFactor { bool equals(const gtsam::GaussianConditional& cg, double tol) const; // Standard Interface - double logNormalizationConstant() const; + double negLogConstant() const; double logProbability(const gtsam::VectorValues& x) const; double evaluate(const gtsam::VectorValues& x) const; double error(const gtsam::VectorValues& x) const; diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 99453ee4ef..b1cfbe12ca 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -76,12 +76,11 @@ TEST(GaussianBayesNet, Evaluate1) { // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). // The covariance matrix inv(Sigma) = R'*R, so the determinant is const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); - EXPECT_DOUBLES_EQUAL(log(constant), - smallBayesNet.at(0)->logNormalizationConstant() + - smallBayesNet.at(1)->logNormalizationConstant(), - 1e-9); - EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(), + EXPECT_DOUBLES_EQUAL(-log(constant), + smallBayesNet.at(0)->negLogConstant() + + smallBayesNet.at(1)->negLogConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.negLogConstant(), 1e-9); const double actual = smallBayesNet.evaluate(mean); EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); } diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index dcd8218894..68894c3142 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -486,16 +486,17 @@ TEST(GaussianConditional, Error) { /* ************************************************************************* */ // Similar test for multivariate gaussian but with sigma 2.0 -TEST(GaussianConditional, LogNormalizationConstant) { +TEST(GaussianConditional, NegLogConstant) { double sigma = 2.0; auto conditional = GaussianConditional::FromMeanAndStddev(X(0), Vector3::Zero(), sigma); VectorValues x; x.insert(X(0), Vector3::Zero()); Matrix3 Sigma = I_3x3 * sigma * sigma; - double expectedLogNormalizingConstant = log(1 / sqrt((2 * M_PI * Sigma).determinant())); + double expectedNegLogConstant = + -log(1 / sqrt((2 * M_PI * Sigma).determinant())); - EXPECT_DOUBLES_EQUAL(expectedLogNormalizingConstant, - conditional.logNormalizationConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(expectedNegLogConstant, conditional.negLogConstant(), + 1e-9); } /* ************************************************************************* */ diff --git a/gtsam/linear/tests/testGaussianDensity.cpp b/gtsam/linear/tests/testGaussianDensity.cpp index 97eb0de704..b2861e833e 100644 --- a/gtsam/linear/tests/testGaussianDensity.cpp +++ b/gtsam/linear/tests/testGaussianDensity.cpp @@ -55,7 +55,7 @@ TEST(GaussianDensity, FromMeanAndStddev) { double expected1 = 0.5 * e.dot(e); EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9); - double expected2 = density.logNormalizationConstant()- 0.5 * e.dot(e); + double expected2 = -(density.negLogConstant() + 0.5 * e.dot(e)); EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9); } diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 6aea154ee4..2518e4c497 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -807,50 +807,50 @@ TEST(NoiseModel, NonDiagonalGaussian) } } -TEST(NoiseModel, LogNormalizationConstant1D) { +TEST(NoiseModel, NegLogNormalizationConstant1D) { // Very simple 1D noise model, which we can compute by hand. double sigma = 0.1; - // For expected values, we compute 1/log(sqrt(|2πΣ|)) by hand. - // = -0.5*(log(2π) + log(Σ)) (since it is 1D) - double expected_value = -0.5 * log(2 * M_PI * sigma * sigma); + // For expected values, we compute -log(1/sqrt(|2πΣ|)) by hand. + // = 0.5*(log(2π) - log(Σ)) (since it is 1D) + double expected_value = 0.5 * log(2 * M_PI * sigma * sigma); // Gaussian { Matrix11 R; R << 1 / sigma; auto noise_model = Gaussian::SqrtInformation(R); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Diagonal { auto noise_model = Diagonal::Sigmas(Vector1(sigma)); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Isotropic { auto noise_model = Isotropic::Sigma(1, sigma); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Unit { auto noise_model = Unit::Create(1); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); double sigma = 1.0; - expected_value = -0.5 * log(2 * M_PI * sigma * sigma); + expected_value = 0.5 * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } -TEST(NoiseModel, LogNormalizationConstant3D) { +TEST(NoiseModel, NegLogNormalizationConstant3D) { // Simple 3D noise model, which we can compute by hand. double sigma = 0.1; size_t n = 3; - // We compute the expected values just like in the LogNormalizationConstant1D + // We compute the expected values just like in the NegLogNormalizationConstant1D // test, but we multiply by 3 due to the determinant. - double expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); + double expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); // Gaussian { @@ -859,27 +859,27 @@ TEST(NoiseModel, LogNormalizationConstant3D) { 0, 1 / sigma, 4, // 0, 0, 1 / sigma; auto noise_model = Gaussian::SqrtInformation(R); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Diagonal { auto noise_model = Diagonal::Sigmas(Vector3(sigma, sigma, sigma)); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Isotropic { auto noise_model = Isotropic::Sigma(n, sigma); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Unit { auto noise_model = Unit::Create(3); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); double sigma = 1.0; - expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); + expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py index 2f7dd4a987..bf2b6a0330 100644 --- a/python/gtsam/tests/test_HybridBayesNet.py +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -90,8 +90,7 @@ def check_invariance(self, conditional, values): self.assertTrue(probability >= 0.0) logProb = conditional.logProbability(values) self.assertAlmostEqual(probability, np.exp(logProb)) - expected = conditional.logNormalizationConstant() - \ - conditional.error(values) + expected = -(conditional.negLogConstant() + conditional.error(values)) self.assertAlmostEqual(logProb, expected) diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index a47c5ad620..1b4209509e 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -14,15 +14,16 @@ import unittest -import gtsam -from gtsam import ( - DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, GaussNewtonOptimizer, - GaussNewtonParams, GncLMParams, GncLossType, GncLMOptimizer, LevenbergMarquardtOptimizer, - LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2, - PriorFactorPoint2, Values -) from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, + GaussNewtonParams, GncLMOptimizer, GncLMParams, GncLossType, + LevenbergMarquardtOptimizer, LevenbergMarquardtParams, + NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2, + PriorFactorPoint2, Values) + KEY1 = 1 KEY2 = 2 @@ -136,7 +137,7 @@ def test_gnc_params(self): # Test optimizer params optimizer = GncLMOptimizer(self.fg, self.initial_values, params) for ict_factor in (0.9, 1.1): - new_ict = ict_factor * optimizer.getInlierCostThresholds() + new_ict = ict_factor * optimizer.getInlierCostThresholds().item() optimizer.setInlierCostThresholds(new_ict) self.assertAlmostEqual(optimizer.getInlierCostThresholds(), new_ict) for w_factor in (0.8, 0.9):