diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml index ef7d7723d9..96e7174aea 100644 --- a/.github/workflows/build-special.yml +++ b/.github/workflows/build-special.yml @@ -111,7 +111,7 @@ jobs: if: matrix.flag == 'deprecated' run: | echo "GTSAM_ALLOW_DEPRECATED_SINCE_V42=ON" >> $GITHUB_ENV - echo "Allow deprecated since version 4.1" + echo "Allow deprecated since version 4.2" - name: Set Use Quaternions Flag if: matrix.flag == 'quaternions' diff --git a/CMakeLists.txt b/CMakeLists.txt index b81479cb84..5bad53988e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ endif() set (GTSAM_VERSION_MAJOR 4) set (GTSAM_VERSION_MINOR 2) set (GTSAM_VERSION_PATCH 0) -set (GTSAM_PRERELEASE_VERSION "a8") +set (GTSAM_PRERELEASE_VERSION "a9") math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}") if (${GTSAM_VERSION_PATCH} EQUAL 0) diff --git a/README.md b/README.md index b32ce70e07..25b3b98ea1 100644 --- a/README.md +++ b/README.md @@ -31,11 +31,11 @@ In the root library folder execute: ```sh #!bash -$ mkdir build -$ cd build -$ cmake .. -$ make check (optional, runs unit tests) -$ make install +mkdir build +cd build +cmake .. +make check (optional, runs unit tests) +make install ``` Prerequisites: @@ -55,9 +55,7 @@ Optional prerequisites - used automatically if findable by CMake: GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect. -GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4` to use the deprecated methods. - -GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42` for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile. + There is a flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42` for newly deprecated methods since the 4.2 release, which is on by default, allowing anyone to just pull version 4.2 and compile. ## Wrappers @@ -68,24 +66,39 @@ We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) If you are using GTSAM for academic work, please use the following citation: -``` +```bibtex @software{gtsam, - author = {Frank Dellaert and Richard Roberts and Varun Agrawal and Alex Cunningham and Chris Beall and Duy-Nguyen Ta and Fan Jiang and lucacarlone and nikai and Jose Luis Blanco-Claraco and Stephen Williams and ydjian and John Lambert and Andy Melim and Zhaoyang Lv and Akshay Krishnan and Jing Dong and Gerry Chen and Krunal Chande and balderdash-devil and DiffDecisionTrees and Sungtae An and mpaluri and Ellon Paiva Mendes and Mike Bosse and Akash Patel and Ayush Baid and Paul Furgale and matthewbroadwaynavenio and roderick-koehle}, + author = {Frank Dellaert and GTSAM Contributors}, title = {borglab/gtsam}, - month = may, + month = May, year = 2022, - publisher = {Zenodo}, - version = {4.2a7}, + publisher = {Georgia Tech Borg Lab}, + version = {4.2a8}, doi = {10.5281/zenodo.5794541}, - url = {https://doi.org/10.5281/zenodo.5794541} + url = {https://github.com/borglab/gtsam)}} } ``` -You can also get the latest citation available from Zenodo below: +To cite the `Factor Graphs for Robot Perception` book, please use: +```bibtex +@book{factor_graphs_for_robot_perception, + author={Frank Dellaert and Michael Kaess}, + year={2017}, + title={Factor Graphs for Robot Perception}, + publisher={Foundations and Trends in Robotics, Vol. 6}, + url={http://www.cs.cmu.edu/~kaess/pub/Dellaert17fnt.pdf} +} +``` -[![DOI](https://zenodo.org/badge/86362856.svg)](https://doi.org/10.5281/zenodo.5794541) +If you are using the IMU preintegration scheme, please cite: +```bibtex +@book{imu_preintegration, + author={Christian Forster and Luca Carlone and Frank Dellaert and Davide Scaramuzza}, + title={IMU preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation}, + year={2015} +} +``` -Specific formats are available in the bottom-right corner of the Zenodo page. ## The Preintegrated IMU Factor diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake index e63fbf1dd4..b6107e6d82 100644 --- a/cmake/GtsamBuildTypes.cmake +++ b/cmake/GtsamBuildTypes.cmake @@ -1,6 +1,6 @@ include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag() -# Set cmake policy to recognize the AppleClang compiler +# Set cmake policy to recognize the Apple Clang compiler # independently from the Clang compiler. if(POLICY CMP0025) cmake_policy(SET CMP0025 NEW) @@ -87,10 +87,10 @@ if(MSVC) list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE WINDOWS_LEAN_AND_MEAN NOMINMAX - ) + ) # Avoid literally hundreds to thousands of warnings: list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC - /wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data + /wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data ) add_compile_options(/wd4005) @@ -183,19 +183,43 @@ set(CMAKE_EXE_LINKER_FLAGS_PROFILING ${GTSAM_CMAKE_EXE_LINKER_FLAGS_PROFILING}) # Clang uses a template depth that is less than standard and is too small if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") - # Apple Clang before 5.0 does not support -ftemplate-depth. - if(NOT (APPLE AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0")) - list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-ftemplate-depth=1024") - endif() + # Apple Clang before 5.0 does not support -ftemplate-depth. + if(NOT (APPLE AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0")) + list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-ftemplate-depth=1024") + endif() endif() if (NOT MSVC) option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF) - if(GTSAM_BUILD_WITH_MARCH_NATIVE AND (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")) - # Add as public flag so all dependant projects also use it, as required - # by Eigen to avid crashes due to SIMD vectorization: - list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native") - endif() + if(GTSAM_BUILD_WITH_MARCH_NATIVE) + # Check if Apple OS and compiler is [Apple]Clang + if(APPLE AND (${CMAKE_CXX_COMPILER_ID} MATCHES "^(Apple)?Clang$")) + # Check Clang version since march=native is only supported for version 15.0+. + if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "15.0") + if(NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") + # Add as public flag so all dependent projects also use it, as required + # by Eigen to avoid crashes due to SIMD vectorization: + list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native") + else() + message(WARNING "Option GTSAM_BUILD_WITH_MARCH_NATIVE ignored, because native architecture is not supported for Apple silicon and AppleClang version < 15.0.") + endif() # CMAKE_SYSTEM_PROCESSOR + else() + # Add as public flag so all dependent projects also use it, as required + # by Eigen to avoid crashes due to SIMD vectorization: + list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native") + endif() # CMAKE_CXX_COMPILER_VERSION + else() + include(CheckCXXCompilerFlag) + CHECK_CXX_COMPILER_FLAG("-march=native" COMPILER_SUPPORTS_MARCH_NATIVE) + if(COMPILER_SUPPORTS_MARCH_NATIVE) + # Add as public flag so all dependent projects also use it, as required + # by Eigen to avoid crashes due to SIMD vectorization: + list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native") + else() + message(WARNING "Option GTSAM_BUILD_WITH_MARCH_NATIVE ignored, because native architecture is not supported.") + endif() # COMPILER_SUPPORTS_MARCH_NATIVE + endif() # APPLE + endif() # GTSAM_BUILD_WITH_MARCH_NATIVE endif() # Set up build type library postfixes diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake index c686796676..2181652e5a 100644 --- a/cmake/GtsamPrinting.cmake +++ b/cmake/GtsamPrinting.cmake @@ -51,11 +51,10 @@ function(print_build_options_for_target target_name_) # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE) print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC) - foreach(build_type ${GTSAM_CMAKE_CONFIGURATION_TYPES}) - string(TOUPPER "${build_type}" build_type_toupper) - # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper}) - print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper}) - # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper}) - print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper}) - endforeach() + string(TOUPPER "${CMAKE_BUILD_TYPE}" build_type_toupper) + # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper}) + print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper}) + # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper}) + print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper}) + endfunction() diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake index 7c8f8533f3..00ec65ba7c 100644 --- a/cmake/HandleGeneralOptions.cmake +++ b/cmake/HandleGeneralOptions.cmake @@ -25,7 +25,7 @@ option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will a option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) -option(GTSAM_ALLOW_DEPRECATED_SINCE_V42 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) +option(GTSAM_ALLOW_DEPRECATED_SINCE_V42 "Allow use of methods/functions deprecated in GTSAM 4.2" ON) option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF) diff --git a/cmake/HandlePrintConfiguration.cmake b/cmake/HandlePrintConfiguration.cmake index 04d27c27f4..5b7ef237ac 100644 --- a/cmake/HandlePrintConfiguration.cmake +++ b/cmake/HandlePrintConfiguration.cmake @@ -87,7 +87,7 @@ print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as defaul print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") -print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V42} "Allow features deprecated in GTSAM 4.1") +print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V42} "Allow features deprecated in GTSAM 4.2") print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") diff --git a/cmake/HandleTBB.cmake b/cmake/HandleTBB.cmake index 52ee754949..fb944ba5b5 100644 --- a/cmake/HandleTBB.cmake +++ b/cmake/HandleTBB.cmake @@ -7,10 +7,6 @@ if (GTSAM_WITH_TBB) if(TBB_FOUND) set(GTSAM_USE_TBB 1) # This will go into config.h -# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1")) -# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.") -# endif() - if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) set(TBB_GREATER_EQUAL_2020 1) else() diff --git a/doc/Hybrid.lyx b/doc/Hybrid.lyx new file mode 100644 index 0000000000..c854a4845a --- /dev/null +++ b/doc/Hybrid.lyx @@ -0,0 +1,719 @@ +#LyX 2.3 created this file. For more info see http://www.lyx.org/ +\lyxformat 544 +\begin_document +\begin_header +\save_transient_properties true +\origin unavailable +\textclass article +\use_default_options true +\maintain_unincluded_children false +\language english +\language_package default +\inputencoding auto +\fontencoding global +\font_roman "default" "default" +\font_sans "default" "default" +\font_typewriter "default" "default" +\font_math "auto" "auto" +\font_default_family default +\use_non_tex_fonts false +\font_sc false +\font_osf false +\font_sf_scale 100 100 +\font_tt_scale 100 100 +\use_microtype false +\use_dash_ligatures true +\graphics default +\default_output_format default +\output_sync 0 +\bibtex_command default +\index_command default +\paperfontsize 11 +\spacing single +\use_hyperref false +\papersize default +\use_geometry true +\use_package amsmath 1 +\use_package amssymb 1 +\use_package cancel 1 +\use_package esint 1 +\use_package mathdots 1 +\use_package mathtools 1 +\use_package mhchem 1 +\use_package stackrel 1 +\use_package stmaryrd 1 +\use_package undertilde 1 +\cite_engine basic +\cite_engine_type default +\biblio_style plain +\use_bibtopic false +\use_indices false +\paperorientation portrait +\suppress_date false +\justification true +\use_refstyle 1 +\use_minted 0 +\index Index +\shortcut idx +\color #008000 +\end_index +\leftmargin 1in +\topmargin 1in +\rightmargin 1in +\bottommargin 1in +\secnumdepth 3 +\tocdepth 3 +\paragraph_separation indent +\paragraph_indentation default +\is_math_indent 0 +\math_numbering_side default +\quotes_style english +\dynamic_quotes 0 +\papercolumns 1 +\papersides 1 +\paperpagestyle default +\tracking_changes false +\output_changes false +\html_math_output 0 +\html_css_as_file 0 +\html_be_strict false +\end_header + +\begin_body + +\begin_layout Title +Hybrid Inference +\end_layout + +\begin_layout Author +Frank Dellaert & Varun Agrawal +\end_layout + +\begin_layout Date +January 2023 +\end_layout + +\begin_layout Section +Hybrid Conditionals +\end_layout + +\begin_layout Standard +Here we develop a hybrid conditional density, on continuous variables (typically + a measurement +\begin_inset Formula $x$ +\end_inset + +), given a mix of continuous variables +\begin_inset Formula $y$ +\end_inset + + and discrete variables +\begin_inset Formula $m$ +\end_inset + +. + We start by reviewing a Gaussian conditional density and its invariants + (relationship between density, error, and normalization constant), and + then work out what needs to happen for a hybrid version. +\end_layout + +\begin_layout Subsubsection* +GaussianConditional +\end_layout + +\begin_layout Standard +A +\emph on +GaussianConditional +\emph default + is a properly normalized, multivariate Gaussian conditional density: +\begin_inset Formula +\[ +P(x|y)=\frac{1}{\sqrt{|2\pi\Sigma|}}\exp\left\{ -\frac{1}{2}\|Rx+Sy-d\|_{\Sigma}^{2}\right\} +\] + +\end_inset + +where +\begin_inset Formula $R$ +\end_inset + + is square and upper-triangular. + For every +\emph on +GaussianConditional +\emph default +, we have the following +\series bold +invariant +\series default +, +\begin_inset Formula +\begin{equation} +\log P(x|y)=K_{gc}-E_{gc}(x,y),\label{eq:gc_invariant} +\end{equation} + +\end_inset + +with the +\series bold +log-normalization constant +\series default + +\begin_inset Formula $K_{gc}$ +\end_inset + + equal to +\begin_inset Formula +\begin{equation} +K_{gc}=\log\frac{1}{\sqrt{|2\pi\Sigma|}}\label{eq:log_constant} +\end{equation} + +\end_inset + + and the +\series bold +error +\series default + +\begin_inset Formula $E_{gc}(x,y)$ +\end_inset + + equal to the negative log-density, up to a constant: +\begin_inset Formula +\begin{equation} +E_{gc}(x,y)=\frac{1}{2}\|Rx+Sy-d\|_{\Sigma}^{2}.\label{eq:gc_error} +\end{equation} + +\end_inset + +. +\end_layout + +\begin_layout Subsubsection* +GaussianMixture +\end_layout + +\begin_layout Standard +A +\emph on +GaussianMixture +\emph default + (maybe to be renamed to +\emph on +GaussianMixtureComponent +\emph default +) just indexes into a number of +\emph on +GaussianConditional +\emph default + instances, that are each properly normalized: +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +P(x|y,m)=P_{m}(x|y). +\] + +\end_inset + +We store one +\emph on +GaussianConditional +\emph default + +\begin_inset Formula $P_{m}(x|y)$ +\end_inset + + for every possible assignment +\begin_inset Formula $m$ +\end_inset + + to a set of discrete variables. + As +\emph on +GaussianMixture +\emph default + is a +\emph on +Conditional +\emph default +, it needs to satisfy the a similar invariant to +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gc_invariant" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +: +\begin_inset Formula +\begin{equation} +\log P(x|y,m)=K_{gm}-E_{gm}(x,y,m).\label{eq:gm_invariant} +\end{equation} + +\end_inset + +If we take the log of +\begin_inset Formula $P(x|y,m)$ +\end_inset + + we get +\begin_inset Formula +\begin{equation} +\log P(x|y,m)=\log P_{m}(x|y)=K_{gcm}-E_{gcm}(x,y).\label{eq:gm_log} +\end{equation} + +\end_inset + +Equating +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gm_invariant" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + and +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gm_log" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + we see that this can be achieved by defining the error +\begin_inset Formula $E_{gm}(x,y,m)$ +\end_inset + + as +\begin_inset Formula +\begin{equation} +E_{gm}(x,y,m)=E_{gcm}(x,y)+K_{gm}-K_{gcm}\label{eq:gm_error} +\end{equation} + +\end_inset + +where choose +\begin_inset Formula $K_{gm}=\max K_{gcm}$ +\end_inset + +, as then the error will always be positive. +\end_layout + +\begin_layout Section +Hybrid Factors +\end_layout + +\begin_layout Standard +In GTSAM, we typically condition on known measurements, and factors encode + the resulting negative log-likelihood of the unknown variables +\begin_inset Formula $y$ +\end_inset + + given the measurements +\begin_inset Formula $x$ +\end_inset + +. + We review how a Gaussian conditional density is converted into a Gaussian + factor, and then develop a hybrid version satisfying the correct invariants + as well. +\end_layout + +\begin_layout Subsubsection* +JacobianFactor +\end_layout + +\begin_layout Standard +A +\emph on +JacobianFactor +\emph default + typically results from a +\emph on +GaussianConditional +\emph default + by having known values +\begin_inset Formula $\bar{x}$ +\end_inset + + for the +\begin_inset Quotes eld +\end_inset + +measurement +\begin_inset Quotes erd +\end_inset + + +\begin_inset Formula $x$ +\end_inset + +: +\begin_inset Formula +\begin{equation} +L(y)\propto P(\bar{x}|y)\label{eq:likelihood} +\end{equation} + +\end_inset + +In GTSAM factors represent the negative log-likelihood +\begin_inset Formula $E_{jf}(y)$ +\end_inset + + and hence we have +\begin_inset Formula +\[ +E_{jf}(y)=-\log L(y)=C-\log P(\bar{x}|y), +\] + +\end_inset + +with +\begin_inset Formula $C$ +\end_inset + + the log of the proportionality constant in +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:likelihood" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +. + Substituting in +\begin_inset Formula $\log P(\bar{x}|y)$ +\end_inset + + from the invariant +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gc_invariant" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + we obtain +\begin_inset Formula +\[ +E_{jf}(y)=C-K_{gc}+E_{gc}(\bar{x},y). +\] + +\end_inset + +The +\emph on +likelihood +\emph default + function in +\emph on +GaussianConditional +\emph default + chooses +\begin_inset Formula $C=K_{gc}$ +\end_inset + +, and the +\emph on +JacobianFactor +\emph default + does not store any constant; it just implements: +\begin_inset Formula +\[ +E_{jf}(y)=E_{gc}(\bar{x},y)=\frac{1}{2}\|R\bar{x}+Sy-d\|_{\Sigma}^{2}=\frac{1}{2}\|Ay-b\|_{\Sigma}^{2} +\] + +\end_inset + +with +\begin_inset Formula $A=S$ +\end_inset + + and +\begin_inset Formula $b=d-R\bar{x}$ +\end_inset + +. +\end_layout + +\begin_layout Subsubsection* +GaussianMixtureFactor +\end_layout + +\begin_layout Standard +Analogously, a +\emph on +GaussianMixtureFactor +\emph default + typically results from a GaussianMixture by having known values +\begin_inset Formula $\bar{x}$ +\end_inset + + for the +\begin_inset Quotes eld +\end_inset + +measurement +\begin_inset Quotes erd +\end_inset + + +\begin_inset Formula $x$ +\end_inset + +: +\begin_inset Formula +\[ +L(y,m)\propto P(\bar{x}|y,m). +\] + +\end_inset + +We will similarly implement the negative log-likelihood +\begin_inset Formula $E_{mf}(y,m)$ +\end_inset + +: +\begin_inset Formula +\[ +E_{mf}(y,m)=-\log L(y,m)=C-\log P(\bar{x}|y,m). +\] + +\end_inset + +Since we know the log-density from the invariant +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gm_invariant" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +, we obtain +\begin_inset Formula +\[ +\log P(\bar{x}|y,m)=K_{gm}-E_{gm}(\bar{x},y,m), +\] + +\end_inset + + and hence +\begin_inset Formula +\[ +E_{mf}(y,m)=C+E_{gm}(\bar{x},y,m)-K_{gm}. +\] + +\end_inset + +Substituting in +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gm_error" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + we finally have an expression where +\begin_inset Formula $K_{gm}$ +\end_inset + + canceled out, but we have a dependence on the individual component constants + +\begin_inset Formula $K_{gcm}$ +\end_inset + +: +\begin_inset Formula +\[ +E_{mf}(y,m)=C+E_{gcm}(\bar{x},y)-K_{gcm}. +\] + +\end_inset + +Unfortunately, we can no longer choose +\begin_inset Formula $C$ +\end_inset + + independently from +\begin_inset Formula $m$ +\end_inset + + to make the constant disappear. + There are two possibilities: +\end_layout + +\begin_layout Enumerate +Implement likelihood to yield both a hybrid factor +\emph on +and +\emph default + a discrete factor. +\end_layout + +\begin_layout Enumerate +Hide the constant inside the collection of JacobianFactor instances, which + is the possibility we implement. +\end_layout + +\begin_layout Standard +In either case, we implement the mixture factor +\begin_inset Formula $E_{mf}(y,m)$ +\end_inset + + as a set of +\emph on +JacobianFactor +\emph default + instances +\begin_inset Formula $E_{mf}(y,m)$ +\end_inset + +, indexed by the discrete assignment +\begin_inset Formula $m$ +\end_inset + +: +\begin_inset Formula +\[ +E_{mf}(y,m)=E_{jfm}(y)=\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}. +\] + +\end_inset + +In GTSAM, we define +\begin_inset Formula $A_{m}$ +\end_inset + + and +\begin_inset Formula $b_{m}$ +\end_inset + + strategically to make the +\emph on +JacobianFactor +\emph default + compute the constant, as well: +\begin_inset Formula +\[ +\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=C+E_{gcm}(\bar{x},y)-K_{gcm}. +\] + +\end_inset + +Substituting in the definition +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gc_error" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + for +\begin_inset Formula $E_{gcm}(\bar{x},y)$ +\end_inset + + we need +\begin_inset Formula +\[ +\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=C+\frac{1}{2}\|R_{m}\bar{x}+S_{m}y-d_{m}\|_{\Sigma_{m}}^{2}-K_{gcm} +\] + +\end_inset + +which can achieved by setting +\begin_inset Formula +\[ +A_{m}=\left[\begin{array}{c} +S_{m}\\ +0 +\end{array}\right],~b_{m}=\left[\begin{array}{c} +d_{m}-R_{m}\bar{x}\\ +c_{m} +\end{array}\right],~\Sigma_{mfm}=\left[\begin{array}{cc} +\Sigma_{m}\\ + & 1 +\end{array}\right] +\] + +\end_inset + +and setting the mode-dependent scalar +\begin_inset Formula $c_{m}$ +\end_inset + + such that +\begin_inset Formula $c_{m}^{2}=C-K_{gcm}$ +\end_inset + +. + This can be achieved by +\begin_inset Formula $C=\max K_{gcm}=K_{gm}$ +\end_inset + + and +\begin_inset Formula $c_{m}=\sqrt{2(C-K_{gcm})}$ +\end_inset + +. + Note that in case that all constants +\begin_inset Formula $K_{gcm}$ +\end_inset + + are equal, we can just use +\begin_inset Formula $C=K_{gm}$ +\end_inset + + and +\begin_inset Formula +\[ +A_{m}=S_{m},~b_{m}=d_{m}-R_{m}\bar{x},~\Sigma_{mfm}=\Sigma_{m} +\] + +\end_inset + +as before. +\end_layout + +\begin_layout Standard +In summary, we have +\begin_inset Formula +\begin{equation} +E_{mf}(y,m)=\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=E_{gcm}(\bar{x},y)+K_{gm}-K_{gcm}.\label{eq:mf_invariant} +\end{equation} + +\end_inset + +which is identical to the GaussianMixture error +\begin_inset CommandInset ref +LatexCommand eqref +reference "eq:gm_error" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +. +\end_layout + +\end_body +\end_document diff --git a/doc/Hybrid.pdf b/doc/Hybrid.pdf new file mode 100644 index 0000000000..558be2902a Binary files /dev/null and b/doc/Hybrid.pdf differ diff --git a/examples/CameraResectioning.cpp b/examples/CameraResectioning.cpp index 7ac2de8b19..1dcca52448 100644 --- a/examples/CameraResectioning.cpp +++ b/examples/CameraResectioning.cpp @@ -30,8 +30,8 @@ using symbol_shorthand::X; * Unary factor on the unknown pose, resulting from meauring the projection of * a known 3D point in the image */ -class ResectioningFactor: public NoiseModelFactor1 { - typedef NoiseModelFactor1 Base; +class ResectioningFactor: public NoiseModelFactorN { + typedef NoiseModelFactorN Base; Cal3_S2::shared_ptr K_; ///< camera's intrinsic parameters Point3 P_; ///< 3D point on the calibration rig diff --git a/examples/CreateSFMExampleData.cpp b/examples/CreateSFMExampleData.cpp index 97f4c84dc8..cf7755bae6 100644 --- a/examples/CreateSFMExampleData.cpp +++ b/examples/CreateSFMExampleData.cpp @@ -18,9 +18,6 @@ #include #include -#include - -using namespace boost::assign; using namespace std; using namespace gtsam; diff --git a/examples/LocalizationExample.cpp b/examples/LocalizationExample.cpp index d9b205359e..7a39a4af5a 100644 --- a/examples/LocalizationExample.cpp +++ b/examples/LocalizationExample.cpp @@ -62,10 +62,10 @@ using namespace gtsam; // // The factor will be a unary factor, affect only a single system variable. It will // also use a standard Gaussian noise model. Hence, we will derive our new factor from -// the NoiseModelFactor1. +// the NoiseModelFactorN. #include -class UnaryFactor: public NoiseModelFactor1 { +class UnaryFactor: public NoiseModelFactorN { // The factor will hold a measurement consisting of an (X,Y) location // We could this with a Point2 but here we just use two doubles double mx_, my_; @@ -76,11 +76,11 @@ class UnaryFactor: public NoiseModelFactor1 { // The constructor requires the variable key, the (X, Y) measurement value, and the noise model UnaryFactor(Key j, double x, double y, const SharedNoiseModel& model): - NoiseModelFactor1(model, j), mx_(x), my_(y) {} + NoiseModelFactorN(model, j), mx_(x), my_(y) {} ~UnaryFactor() override {} - // Using the NoiseModelFactor1 base class there are two functions that must be overridden. + // Using the NoiseModelFactorN base class there are two functions that must be overridden. // The first is the 'evaluateError' function. This function implements the desired measurement // function, returning a vector of errors when evaluated at the provided variable value. It // must also calculate the Jacobians for this measurement function, if requested. diff --git a/examples/Pose3Localization.cpp b/examples/Pose3Localization.cpp index c18a9e9cee..44076ab38b 100644 --- a/examples/Pose3Localization.cpp +++ b/examples/Pose3Localization.cpp @@ -43,9 +43,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } @@ -74,10 +74,8 @@ int main(const int argc, const char* argv[]) { // Calculate and print marginal covariances for all variables Marginals marginals(*graph, result); - for (const auto key_value : result) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - std::cout << marginals.marginalCovariance(key_value.key) << endl; + for (const auto& key_pose : result.extract()) { + std::cout << marginals.marginalCovariance(key_pose.first) << endl; } return 0; } diff --git a/examples/Pose3SLAMExample_changeKeys.cpp b/examples/Pose3SLAMExample_changeKeys.cpp index 5d4ed6657d..7da83d211b 100644 --- a/examples/Pose3SLAMExample_changeKeys.cpp +++ b/examples/Pose3SLAMExample_changeKeys.cpp @@ -55,14 +55,14 @@ int main(const int argc, const char *argv[]) { std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl; // Additional: rewrite input with simplified keys 0,1,... Values simpleInitial; - for(const auto key_value: *initial) { + for (const auto k : initial->keys()) { Key key; - if(add) - key = key_value.key + firstKey; + if (add) + key = k + firstKey; else - key = key_value.key - firstKey; + key = k - firstKey; - simpleInitial.insert(key, initial->at(key_value.key)); + simpleInitial.insert(key, initial->at(k)); } NonlinearFactorGraph simpleGraph; for(const boost::shared_ptr& factor: *graph) { @@ -71,11 +71,11 @@ int main(const int argc, const char *argv[]) { if (pose3Between){ Key key1, key2; if(add){ - key1 = pose3Between->key1() + firstKey; - key2 = pose3Between->key2() + firstKey; + key1 = pose3Between->key<1>() + firstKey; + key2 = pose3Between->key<2>() + firstKey; }else{ - key1 = pose3Between->key1() - firstKey; - key2 = pose3Between->key2() - firstKey; + key1 = pose3Between->key<1>() - firstKey; + key2 = pose3Between->key<2>() - firstKey; } NonlinearFactor::shared_ptr simpleFactor( new BetweenFactor(key1, key2, pose3Between->measured(), pose3Between->noiseModel())); diff --git a/examples/Pose3SLAMExample_g2o.cpp b/examples/Pose3SLAMExample_g2o.cpp index 3679643072..7ae2978ce5 100644 --- a/examples/Pose3SLAMExample_g2o.cpp +++ b/examples/Pose3SLAMExample_g2o.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/Pose3SLAMExample_initializePose3Chordal.cpp b/examples/Pose3SLAMExample_initializePose3Chordal.cpp index 992750fed8..03db9fc77a 100644 --- a/examples/Pose3SLAMExample_initializePose3Chordal.cpp +++ b/examples/Pose3SLAMExample_initializePose3Chordal.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/Pose3SLAMExample_initializePose3Gradient.cpp b/examples/Pose3SLAMExample_initializePose3Gradient.cpp index 384f290a19..31693c5ff5 100644 --- a/examples/Pose3SLAMExample_initializePose3Gradient.cpp +++ b/examples/Pose3SLAMExample_initializePose3Gradient.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/SolverComparer.cpp b/examples/SolverComparer.cpp index 7bae41403a..3fffc31d07 100644 --- a/examples/SolverComparer.cpp +++ b/examples/SolverComparer.cpp @@ -69,8 +69,8 @@ namespace br = boost::range; typedef Pose2 Pose; -typedef NoiseModelFactor1 NM1; -typedef NoiseModelFactor2 NM2; +typedef NoiseModelFactorN NM1; +typedef NoiseModelFactorN NM2; typedef BearingRangeFactor BR; double chi2_red(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config) { @@ -261,7 +261,7 @@ void runIncremental() if(BetweenFactor::shared_ptr factor = boost::dynamic_pointer_cast >(datasetMeasurements[nextMeasurement])) { - Key key1 = factor->key1(), key2 = factor->key2(); + Key key1 = factor->key<1>(), key2 = factor->key<2>(); if(((int)key1 >= firstStep && key1 < key2) || ((int)key2 >= firstStep && key2 < key1)) { // We found an odometry starting at firstStep firstPose = std::min(key1, key2); @@ -313,11 +313,11 @@ void runIncremental() boost::dynamic_pointer_cast >(measurementf)) { // Stop collecting measurements that are for future steps - if(factor->key1() > step || factor->key2() > step) + if(factor->key<1>() > step || factor->key<2>() > step) break; // Require that one of the nodes is the current one - if(factor->key1() != step && factor->key2() != step) + if(factor->key<1>() != step && factor->key<2>() != step) throw runtime_error("Problem in data file, out-of-sequence measurements"); // Add a new factor @@ -325,22 +325,22 @@ void runIncremental() const auto& measured = factor->measured(); // Initialize the new variable - if(factor->key1() > factor->key2()) { - if(!newVariables.exists(factor->key1())) { // Only need to check newVariables since loop closures come after odometry + if(factor->key<1>() > factor->key<2>()) { + if(!newVariables.exists(factor->key<1>())) { // Only need to check newVariables since loop closures come after odometry if(step == 1) - newVariables.insert(factor->key1(), measured.inverse()); + newVariables.insert(factor->key<1>(), measured.inverse()); else { - Pose prevPose = isam2.calculateEstimate(factor->key2()); - newVariables.insert(factor->key1(), prevPose * measured.inverse()); + Pose prevPose = isam2.calculateEstimate(factor->key<2>()); + newVariables.insert(factor->key<1>(), prevPose * measured.inverse()); } } } else { - if(!newVariables.exists(factor->key2())) { // Only need to check newVariables since loop closures come after odometry + if(!newVariables.exists(factor->key<2>())) { // Only need to check newVariables since loop closures come after odometry if(step == 1) - newVariables.insert(factor->key2(), measured); + newVariables.insert(factor->key<2>(), measured); else { - Pose prevPose = isam2.calculateEstimate(factor->key1()); - newVariables.insert(factor->key2(), prevPose * measured); + Pose prevPose = isam2.calculateEstimate(factor->key<1>()); + newVariables.insert(factor->key<2>(), prevPose * measured); } } } @@ -559,12 +559,12 @@ void runPerturb() // Perturb values VectorValues noise; - for(const Values::KeyValuePair key_val: initial) + for(const auto& key_dim: initial.dims()) { - Vector noisev(key_val.value.dim()); + Vector noisev(key_dim.second); for(Vector::Index i = 0; i < noisev.size(); ++i) noisev(i) = normal(rng); - noise.insert(key_val.key, noisev); + noise.insert(key_dim.first, noisev); } Values perturbed = initial.retract(noise); diff --git a/examples/StereoVOExample_large.cpp b/examples/StereoVOExample_large.cpp index af81db7030..c4a3a8de42 100644 --- a/examples/StereoVOExample_large.cpp +++ b/examples/StereoVOExample_large.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -113,7 +114,7 @@ int main(int argc, char** argv) { Values result = optimizer.optimize(); cout << "Final result sample:" << endl; - Values pose_values = result.filter(); + Values pose_values = utilities::allPose3s(result); pose_values.print("Final camera poses:\n"); return 0; diff --git a/examples/TimeTBB.cpp b/examples/TimeTBB.cpp index 4bc5144f44..68511e76fb 100644 --- a/examples/TimeTBB.cpp +++ b/examples/TimeTBB.cpp @@ -18,13 +18,11 @@ #include #include -#include #include #include using namespace std; using namespace gtsam; -using boost::assign::list_of; #ifdef GTSAM_USE_TBB @@ -81,7 +79,7 @@ map testWithoutMemoryAllocation(int num_threads) // Now call it vector results(numberOfProblems); - const vector grainSizes = list_of(1)(10)(100)(1000); + const vector grainSizes = {1, 10, 100, 1000}; map timingResults; for(size_t grainSize: grainSizes) { @@ -145,7 +143,7 @@ map testWithMemoryAllocation(int num_threads) // Now call it vector results(numberOfProblems); - const vector grainSizes = list_of(1)(10)(100)(1000); + const vector grainSizes = {1, 10, 100, 1000}; map timingResults; for(size_t grainSize: grainSizes) { @@ -172,7 +170,7 @@ int main(int argc, char* argv[]) cout << "numberOfProblems = " << numberOfProblems << endl; cout << "problemSize = " << problemSize << endl; - const vector numThreads = list_of(1)(4)(8); + const vector numThreads = {1, 4, 8}; Results results; for(size_t n: numThreads) diff --git a/gtsam/base/FastList.h b/gtsam/base/FastList.h index 47f8133118..29ecd7dbc9 100644 --- a/gtsam/base/FastList.h +++ b/gtsam/base/FastList.h @@ -56,6 +56,9 @@ class FastList: public std::list l) : Base(l) {} + #ifdef GTSAM_ALLOCATOR_BOOSTPOOL /** Copy constructor from a standard STL container */ FastList(const std::list& x) { diff --git a/gtsam/base/FastSet.h b/gtsam/base/FastSet.h index 906412f4d2..1fceebad5b 100644 --- a/gtsam/base/FastSet.h +++ b/gtsam/base/FastSet.h @@ -56,15 +56,9 @@ class FastSet: public std::set, typedef std::set, typename internal::FastDefaultAllocator::type> Base; - /** Default constructor */ - FastSet() { - } + using Base::Base; // Inherit the set constructors - /** Constructor from a range, passes through to base class */ - template - explicit FastSet(INPUTITERATOR first, INPUTITERATOR last) : - Base(first, last) { - } + FastSet() = default; ///< Default constructor /** Constructor from a iterable container, passes through to base class */ template diff --git a/gtsam/base/SymmetricBlockMatrix.cpp b/gtsam/base/SymmetricBlockMatrix.cpp index 7f41da137b..d37bbf7d14 100644 --- a/gtsam/base/SymmetricBlockMatrix.cpp +++ b/gtsam/base/SymmetricBlockMatrix.cpp @@ -24,6 +24,12 @@ namespace gtsam { +/* ************************************************************************* */ +SymmetricBlockMatrix::SymmetricBlockMatrix() : blockStart_(0) { + variableColOffsets_.push_back(0); + assertInvariants(); +} + /* ************************************************************************* */ SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf( const SymmetricBlockMatrix& other) { @@ -61,6 +67,18 @@ Matrix SymmetricBlockMatrix::block(DenseIndex I, DenseIndex J) const { } } +/* ************************************************************************* */ +void SymmetricBlockMatrix::negate() { + full().triangularView() *= -1.0; +} + +/* ************************************************************************* */ +void SymmetricBlockMatrix::invertInPlace() { + const auto identity = Matrix::Identity(rows(), rows()); + full().triangularView() = + selfadjointView().llt().solve(identity).triangularView(); +} + /* ************************************************************************* */ void SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) { gttic(VerticalBlockMatrix_choleskyPartial); diff --git a/gtsam/base/SymmetricBlockMatrix.h b/gtsam/base/SymmetricBlockMatrix.h index 302a1ec347..e8afe6e386 100644 --- a/gtsam/base/SymmetricBlockMatrix.h +++ b/gtsam/base/SymmetricBlockMatrix.h @@ -63,12 +63,7 @@ namespace gtsam { public: /// Construct from an empty matrix (asserts that the matrix is empty) - SymmetricBlockMatrix() : - blockStart_(0) - { - variableColOffsets_.push_back(0); - assertInvariants(); - } + SymmetricBlockMatrix(); /// Construct from a container of the sizes of each block. template @@ -265,19 +260,10 @@ namespace gtsam { } /// Negate the entire active matrix. - void negate() { - full().triangularView() *= -1.0; - } + void negate(); /// Invert the entire active matrix in place. - void invertInPlace() { - const auto identity = Matrix::Identity(rows(), rows()); - full().triangularView() = - selfadjointView() - .llt() - .solve(identity) - .triangularView(); - } + void invertInPlace(); /// @} diff --git a/gtsam/base/Testable.h b/gtsam/base/Testable.h index f7eddd4140..b68d6a97cb 100644 --- a/gtsam/base/Testable.h +++ b/gtsam/base/Testable.h @@ -23,7 +23,7 @@ * void print(const std::string& name) const = 0; * * equality up to tolerance - * tricky to implement, see NoiseModelFactor1 for an example + * tricky to implement, see PriorFactor for an example * equals is not supposed to print out *anything*, just return true|false * bool equals(const Derived& expected, double tol) const = 0; * diff --git a/gtsam/base/Vector.cpp b/gtsam/base/Vector.cpp index 658ab9a0d4..16d913a967 100644 --- a/gtsam/base/Vector.cpp +++ b/gtsam/base/Vector.cpp @@ -299,17 +299,14 @@ weightedPseudoinverse(const Vector& a, const Vector& weights) { } /* ************************************************************************* */ -Vector concatVectors(const std::list& vs) -{ +Vector concatVectors(const std::list& vs) { size_t dim = 0; - for(Vector v: vs) - dim += v.size(); + for (const Vector& v : vs) dim += v.size(); Vector A(dim); size_t index = 0; - for(Vector v: vs) { - for(int d = 0; d < v.size(); d++) - A(d+index) = v(d); + for (const Vector& v : vs) { + for (int d = 0; d < v.size(); d++) A(d + index) = v(d); index += v.size(); } diff --git a/gtsam/base/tests/testDSFMap.cpp b/gtsam/base/tests/testDSFMap.cpp index 816498ce85..3fd556832e 100644 --- a/gtsam/base/tests/testDSFMap.cpp +++ b/gtsam/base/tests/testDSFMap.cpp @@ -16,17 +16,14 @@ * @brief unit tests for DSFMap */ -#include - -#include -#include -using namespace boost::assign; - #include +#include #include +#include +#include +#include -using namespace std; using namespace gtsam; /* ************************************************************************* */ @@ -65,9 +62,8 @@ TEST(DSFMap, merge3) { TEST(DSFMap, mergePairwiseMatches) { // Create some "matches" - typedef pair Match; - list matches; - matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6); + typedef std::pair Match; + const std::list matches{{1, 2}, {2, 3}, {4, 5}, {4, 6}}; // Merge matches DSFMap dsf; @@ -86,18 +82,17 @@ TEST(DSFMap, mergePairwiseMatches) { TEST(DSFMap, mergePairwiseMatches2) { // Create some measurements with image index and feature index - typedef pair Measurement; + typedef std::pair Measurement; Measurement m11(1,1),m12(1,2),m14(1,4); // in image 1 Measurement m22(2,2),m23(2,3),m25(2,5),m26(2,6); // in image 2 // Add them all - list measurements; - measurements += m11,m12,m14, m22,m23,m25,m26; + const std::list measurements{m11, m12, m14, m22, m23, m25, m26}; // Create some "matches" - typedef pair Match; - list matches; - matches += Match(m11,m22), Match(m12,m23), Match(m14,m25), Match(m14,m26); + typedef std::pair Match; + const std::list matches{ + {m11, m22}, {m12, m23}, {m14, m25}, {m14, m26}}; // Merge matches DSFMap dsf; @@ -114,26 +109,16 @@ TEST(DSFMap, mergePairwiseMatches2) { /* ************************************************************************* */ TEST(DSFMap, sets){ // Create some "matches" - typedef pair Match; - list matches; - matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6); + using Match = std::pair; + const std::list matches{{1, 2}, {2, 3}, {4, 5}, {4, 6}}; // Merge matches DSFMap dsf; for(const Match& m: matches) dsf.merge(m.first,m.second); - map > sets = dsf.sets(); - set s1, s2; - s1 += 1,2,3; - s2 += 4,5,6; - - /*for(key_pair st: sets){ - cout << "Set " << st.first << " :{"; - for(const size_t s: st.second) - cout << s << ", "; - cout << "}" << endl; - }*/ + std::map > sets = dsf.sets(); + const std::set s1{1, 2, 3}, s2{4, 5, 6}; EXPECT(s1 == sets[1]); EXPECT(s2 == sets[4]); diff --git a/gtsam/base/tests/testDSFVector.cpp b/gtsam/base/tests/testDSFVector.cpp index 88cea8c010..b50ddde339 100644 --- a/gtsam/base/tests/testDSFVector.cpp +++ b/gtsam/base/tests/testDSFVector.cpp @@ -21,14 +21,15 @@ #include #include -#include -#include -#include -using namespace boost::assign; #include +#include +#include +#include -using namespace std; +using std::pair; +using std::map; +using std::vector; using namespace gtsam; /* ************************************************************************* */ @@ -64,8 +65,8 @@ TEST(DSFBase, mergePairwiseMatches) { // Create some "matches" typedef pair Match; - vector matches; - matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6); + const vector matches{Match(1, 2), Match(2, 3), Match(4, 5), + Match(4, 6)}; // Merge matches DSFBase dsf(7); // We allow for keys 0..6 @@ -85,7 +86,7 @@ TEST(DSFBase, mergePairwiseMatches) { /* ************************************************************************* */ TEST(DSFVector, merge2) { boost::shared_ptr v = boost::make_shared(5); - std::vector keys; keys += 1, 3; + const std::vector keys {1, 3}; DSFVector dsf(v, keys); dsf.merge(1,3); EXPECT(dsf.find(1) == dsf.find(3)); @@ -95,10 +96,10 @@ TEST(DSFVector, merge2) { TEST(DSFVector, sets) { DSFVector dsf(2); dsf.merge(0,1); - map > sets = dsf.sets(); + map > sets = dsf.sets(); LONGS_EQUAL(1, sets.size()); - set expected; expected += 0, 1; + const std::set expected{0, 1}; EXPECT(expected == sets[dsf.find(0)]); } @@ -109,7 +110,7 @@ TEST(DSFVector, arrays) { map > arrays = dsf.arrays(); LONGS_EQUAL(1, arrays.size()); - vector expected; expected += 0, 1; + const vector expected{0, 1}; EXPECT(expected == arrays[dsf.find(0)]); } @@ -118,10 +119,10 @@ TEST(DSFVector, sets2) { DSFVector dsf(3); dsf.merge(0,1); dsf.merge(1,2); - map > sets = dsf.sets(); + map > sets = dsf.sets(); LONGS_EQUAL(1, sets.size()); - set expected; expected += 0, 1, 2; + const std::set expected{0, 1, 2}; EXPECT(expected == sets[dsf.find(0)]); } @@ -133,7 +134,7 @@ TEST(DSFVector, arrays2) { map > arrays = dsf.arrays(); LONGS_EQUAL(1, arrays.size()); - vector expected; expected += 0, 1, 2; + const vector expected{0, 1, 2}; EXPECT(expected == arrays[dsf.find(0)]); } @@ -141,10 +142,10 @@ TEST(DSFVector, arrays2) { TEST(DSFVector, sets3) { DSFVector dsf(3); dsf.merge(0,1); - map > sets = dsf.sets(); + map > sets = dsf.sets(); LONGS_EQUAL(2, sets.size()); - set expected; expected += 0, 1; + const std::set expected{0, 1}; EXPECT(expected == sets[dsf.find(0)]); } @@ -155,7 +156,7 @@ TEST(DSFVector, arrays3) { map > arrays = dsf.arrays(); LONGS_EQUAL(2, arrays.size()); - vector expected; expected += 0, 1; + const vector expected{0, 1}; EXPECT(expected == arrays[dsf.find(0)]); } @@ -163,10 +164,10 @@ TEST(DSFVector, arrays3) { TEST(DSFVector, set) { DSFVector dsf(3); dsf.merge(0,1); - set set = dsf.set(0); + std::set set = dsf.set(0); LONGS_EQUAL(2, set.size()); - std::set expected; expected += 0, 1; + const std::set expected{0, 1}; EXPECT(expected == set); } @@ -175,10 +176,10 @@ TEST(DSFVector, set2) { DSFVector dsf(3); dsf.merge(0,1); dsf.merge(1,2); - set set = dsf.set(0); + std::set set = dsf.set(0); LONGS_EQUAL(3, set.size()); - std::set expected; expected += 0, 1, 2; + const std::set expected{0, 1, 2}; EXPECT(expected == set); } @@ -195,13 +196,12 @@ TEST(DSFVector, isSingleton) { TEST(DSFVector, mergePairwiseMatches) { // Create some measurements - vector keys; - keys += 1,2,3,4,5,6; + const vector keys{1, 2, 3, 4, 5, 6}; // Create some "matches" typedef pair Match; - vector matches; - matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6); + const vector matches{Match(1, 2), Match(2, 3), Match(4, 5), + Match(4, 6)}; // Merge matches DSFVector dsf(keys); @@ -209,13 +209,13 @@ TEST(DSFVector, mergePairwiseMatches) { dsf.merge(m.first,m.second); // Check that we have two connected components, 1,2,3 and 4,5,6 - map > sets = dsf.sets(); + map > sets = dsf.sets(); LONGS_EQUAL(2, sets.size()); - set expected1; expected1 += 1,2,3; - set actual1 = sets[dsf.find(2)]; + const std::set expected1{1, 2, 3}; + std::set actual1 = sets[dsf.find(2)]; EXPECT(expected1 == actual1); - set expected2; expected2 += 4,5,6; - set actual2 = sets[dsf.find(5)]; + const std::set expected2{4, 5, 6}; + std::set actual2 = sets[dsf.find(5)]; EXPECT(expected2 == actual2); } diff --git a/gtsam/base/tests/testFastContainers.cpp b/gtsam/base/tests/testFastContainers.cpp index 12dee036eb..acb6347db2 100644 --- a/gtsam/base/tests/testFastContainers.cpp +++ b/gtsam/base/tests/testFastContainers.cpp @@ -11,12 +11,8 @@ #include #include -#include -#include - #include -using namespace boost::assign; using namespace gtsam; /* ************************************************************************* */ @@ -25,7 +21,7 @@ TEST( testFastContainers, KeySet ) { KeyVector init_vector {2, 3, 4, 5}; KeySet actSet(init_vector); - KeySet expSet; expSet += 2, 3, 4, 5; + KeySet expSet{2, 3, 4, 5}; EXPECT(actSet == expSet); } diff --git a/gtsam/base/tests/testSymmetricBlockMatrix.cpp b/gtsam/base/tests/testSymmetricBlockMatrix.cpp index c24e12c251..24dbe0a967 100644 --- a/gtsam/base/tests/testSymmetricBlockMatrix.cpp +++ b/gtsam/base/tests/testSymmetricBlockMatrix.cpp @@ -17,14 +17,12 @@ #include #include -#include using namespace std; using namespace gtsam; -using boost::assign::list_of; static SymmetricBlockMatrix testBlockMatrix( - list_of(3)(2)(1), + std::vector{3, 2, 1}, (Matrix(6, 6) << 1, 2, 3, 4, 5, 6, 2, 8, 9, 10, 11, 12, @@ -101,7 +99,8 @@ TEST(SymmetricBlockMatrix, Ranges) /* ************************************************************************* */ TEST(SymmetricBlockMatrix, expressions) { - SymmetricBlockMatrix expected1(list_of(2)(3)(1), (Matrix(6, 6) << + const std::vector dimensions{2, 3, 1}; + SymmetricBlockMatrix expected1(dimensions, (Matrix(6, 6) << 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 8, 0, @@ -109,7 +108,7 @@ TEST(SymmetricBlockMatrix, expressions) 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0).finished()); - SymmetricBlockMatrix expected2(list_of(2)(3)(1), (Matrix(6, 6) << + SymmetricBlockMatrix expected2(dimensions, (Matrix(6, 6) << 0, 0, 10, 15, 20, 0, 0, 0, 12, 18, 24, 0, 0, 0, 0, 0, 0, 0, @@ -120,32 +119,32 @@ TEST(SymmetricBlockMatrix, expressions) Matrix a = (Matrix(1, 3) << 2, 3, 4).finished(); Matrix b = (Matrix(1, 2) << 5, 6).finished(); - SymmetricBlockMatrix bm1(list_of(2)(3)(1)); + SymmetricBlockMatrix bm1(dimensions); bm1.setZero(); bm1.diagonalBlock(1).rankUpdate(a.transpose()); EXPECT(assert_equal(Matrix(expected1.selfadjointView()), bm1.selfadjointView())); - SymmetricBlockMatrix bm2(list_of(2)(3)(1)); + SymmetricBlockMatrix bm2(dimensions); bm2.setZero(); bm2.updateOffDiagonalBlock(0, 1, b.transpose() * a); EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm2.selfadjointView())); - SymmetricBlockMatrix bm3(list_of(2)(3)(1)); + SymmetricBlockMatrix bm3(dimensions); bm3.setZero(); bm3.updateOffDiagonalBlock(1, 0, a.transpose() * b); EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm3.selfadjointView())); - SymmetricBlockMatrix bm4(list_of(2)(3)(1)); + SymmetricBlockMatrix bm4(dimensions); bm4.setZero(); bm4.updateDiagonalBlock(1, expected1.diagonalBlock(1)); EXPECT(assert_equal(Matrix(expected1.selfadjointView()), bm4.selfadjointView())); - SymmetricBlockMatrix bm5(list_of(2)(3)(1)); + SymmetricBlockMatrix bm5(dimensions); bm5.setZero(); bm5.updateOffDiagonalBlock(0, 1, expected2.aboveDiagonalBlock(0, 1)); EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm5.selfadjointView())); - SymmetricBlockMatrix bm6(list_of(2)(3)(1)); + SymmetricBlockMatrix bm6(dimensions); bm6.setZero(); bm6.updateOffDiagonalBlock(1, 0, expected2.aboveDiagonalBlock(0, 1).transpose()); EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm6.selfadjointView())); @@ -162,7 +161,8 @@ TEST(SymmetricBlockMatrix, inverseInPlace) { inputMatrix += c * c.transpose(); const Matrix expectedInverse = inputMatrix.inverse(); - SymmetricBlockMatrix symmMatrix(list_of(2)(1), inputMatrix); + const std::vector dimensions{2, 1}; + SymmetricBlockMatrix symmMatrix(dimensions, inputMatrix); // invert in place symmMatrix.invertInPlace(); EXPECT(assert_equal(expectedInverse, symmMatrix.selfadjointView())); diff --git a/gtsam/base/tests/testTreeTraversal.cpp b/gtsam/base/tests/testTreeTraversal.cpp index 88e476cb9e..147f4f0307 100644 --- a/gtsam/base/tests/testTreeTraversal.cpp +++ b/gtsam/base/tests/testTreeTraversal.cpp @@ -23,16 +23,13 @@ #include #include #include -#include -using boost::assign::operator+=; -using namespace std; using namespace gtsam; struct TestNode { typedef boost::shared_ptr shared_ptr; int data; - vector children; + std::vector children; TestNode() : data(-1) {} TestNode(int data) : data(data) {} }; @@ -110,10 +107,8 @@ TEST(treeTraversal, DepthFirst) TestForest testForest = makeTestForest(); // Expected visit order - std::list preOrderExpected; - preOrderExpected += 0, 2, 3, 4, 1; - std::list postOrderExpected; - postOrderExpected += 2, 4, 3, 0, 1; + const std::list preOrderExpected{0, 2, 3, 4, 1}; + const std::list postOrderExpected{2, 4, 3, 0, 1}; // Actual visit order PreOrderVisitor preVisitor; @@ -135,8 +130,7 @@ TEST(treeTraversal, CloneForest) testForest2.roots_ = treeTraversal::CloneForest(testForest1); // Check that the original and clone both are expected - std::list preOrder1Expected; - preOrder1Expected += 0, 2, 3, 4, 1; + const std::list preOrder1Expected{0, 2, 3, 4, 1}; std::list preOrder1Actual = getPreorder(testForest1); std::list preOrder2Actual = getPreorder(testForest2); EXPECT(assert_container_equality(preOrder1Expected, preOrder1Actual)); @@ -144,8 +138,7 @@ TEST(treeTraversal, CloneForest) // Modify clone - should not modify original testForest2.roots_[0]->children[1]->data = 10; - std::list preOrderModifiedExpected; - preOrderModifiedExpected += 0, 2, 10, 4, 1; + const std::list preOrderModifiedExpected{0, 2, 10, 4, 1}; // Check that original is the same and only the clone is modified std::list preOrder1ModActual = getPreorder(testForest1); diff --git a/gtsam/base/tests/testVerticalBlockMatrix.cpp b/gtsam/base/tests/testVerticalBlockMatrix.cpp index 8bb4474a45..758d776c85 100644 --- a/gtsam/base/tests/testVerticalBlockMatrix.cpp +++ b/gtsam/base/tests/testVerticalBlockMatrix.cpp @@ -18,14 +18,13 @@ #include #include -#include -using namespace std; +#include +#include + using namespace gtsam; -using boost::assign::list_of; -list L = list_of(3)(2)(1); -vector dimensions(L.begin(),L.end()); +const std::vector dimensions{3, 2, 1}; //***************************************************************************** TEST(VerticalBlockMatrix, Constructor1) { diff --git a/gtsam/base/types.h b/gtsam/base/types.h index cb8412cdf4..b9f4b0a3e1 100644 --- a/gtsam/base/types.h +++ b/gtsam/base/types.h @@ -46,18 +46,49 @@ #include #endif +/* Define macros for ignoring compiler warnings. + * Usage Example: + * ``` + * CLANG_DIAGNOSTIC_PUSH_IGNORE("-Wdeprecated-declarations") + * GCC_DIAGNOSTIC_PUSH_IGNORE("-Wdeprecated-declarations") + * MSVC_DIAGNOSTIC_PUSH_IGNORE(4996) + * // ... code you want to suppress deprecation warnings for ... + * DIAGNOSTIC_POP() + * ``` + */ +#define DO_PRAGMA(x) _Pragma (#x) #ifdef __clang__ # define CLANG_DIAGNOSTIC_PUSH_IGNORE(diag) \ _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"" diag "\"") + DO_PRAGMA(clang diagnostic ignored diag) #else # define CLANG_DIAGNOSTIC_PUSH_IGNORE(diag) #endif -#ifdef __clang__ -# define CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop") +#ifdef __GNUC__ +# define GCC_DIAGNOSTIC_PUSH_IGNORE(diag) \ + _Pragma("GCC diagnostic push") \ + DO_PRAGMA(GCC diagnostic ignored diag) +#else +# define GCC_DIAGNOSTIC_PUSH_IGNORE(diag) +#endif + +#ifdef _MSC_VER +# define MSVC_DIAGNOSTIC_PUSH_IGNORE(code) \ + _Pragma("warning ( push )") \ + DO_PRAGMA(warning ( disable : code )) +#else +# define MSVC_DIAGNOSTIC_PUSH_IGNORE(code) +#endif + +#if defined(__clang__) +# define DIAGNOSTIC_POP() _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) +# define DIAGNOSTIC_POP() _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define DIAGNOSTIC_POP() _Pragma("warning ( pop )") #else -# define CLANG_DIAGNOSTIC_POP() +# define DIAGNOSTIC_POP() #endif namespace gtsam { diff --git a/gtsam/base/utilities.h b/gtsam/base/utilities.h index d9b92b8aa3..0a05a704c0 100644 --- a/gtsam/base/utilities.h +++ b/gtsam/base/utilities.h @@ -27,3 +27,42 @@ struct RedirectCout { }; } + +// boost::index_sequence was introduced in 1.66, so we'll manually define an +// implementation if user has 1.65. boost::index_sequence is used to get array +// indices that align with a parameter pack. +#include +#if BOOST_VERSION >= 106600 +#include +#else +namespace boost { +namespace mp11 { +// Adapted from https://stackoverflow.com/a/32223343/9151520 +template +struct index_sequence { + using type = index_sequence; + using value_type = size_t; + static constexpr std::size_t size() noexcept { return sizeof...(Ints); } +}; +namespace detail { +template +struct _merge_and_renumber; + +template +struct _merge_and_renumber, index_sequence > + : index_sequence {}; +} // namespace detail +template +struct make_index_sequence + : detail::_merge_and_renumber< + typename make_index_sequence::type, + typename make_index_sequence::type> {}; +template <> +struct make_index_sequence<0> : index_sequence<> {}; +template <> +struct make_index_sequence<1> : index_sequence<0> {}; +template +using index_sequence_for = make_index_sequence; +} // namespace mp11 +} // namespace boost +#endif diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index 25e18cfaf2..b3f0d69b0e 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -71,7 +71,7 @@ namespace gtsam { static inline double id(const double& x) { return x; } }; - AlgebraicDecisionTree() : Base(1.0) {} + AlgebraicDecisionTree(double leaf = 1.0) : Base(leaf) {} // Explicitly non-explicit constructor AlgebraicDecisionTree(const Base& add) : Base(add) {} @@ -158,9 +158,9 @@ namespace gtsam { } /// print method customized to value type `double`. - void print(const std::string& s, - const typename Base::LabelFormatter& labelFormatter = - &DefaultFormatter) const { + void print(const std::string& s = "", + const typename Base::LabelFormatter& labelFormatter = + &DefaultFormatter) const { auto valueFormatter = [](const double& v) { return (boost::format("%4.8g") % v).str(); }; diff --git a/gtsam/discrete/Assignment.h b/gtsam/discrete/Assignment.h index 0ea84e4501..56a0ed3276 100644 --- a/gtsam/discrete/Assignment.h +++ b/gtsam/discrete/Assignment.h @@ -51,6 +51,13 @@ class Assignment : public std::map { public: using std::map::operator=; + // Define the implicit default constructor. + Assignment() = default; + + // Construct from initializer list. + Assignment(std::initializer_list> init) + : std::map{init} {} + void print(const std::string& s = "Assignment: ", const std::function& labelFormatter = &DefaultFormatter) const { diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index f727432065..9f3d5e8f95 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -22,14 +22,10 @@ #include #include -#include #include #include -#include #include -#include -#include -#include + #include #include #include @@ -41,8 +37,6 @@ namespace gtsam { - using boost::assign::operator+=; - /****************************************************************************/ // Node /****************************************************************************/ @@ -64,6 +58,9 @@ namespace gtsam { */ size_t nrAssignments_; + /// Default constructor for serialization. + Leaf() {} + /// Constructor from constant Leaf(const Y& constant, size_t nrAssignments = 1) : constant_(constant), nrAssignments_(nrAssignments) {} @@ -154,6 +151,18 @@ namespace gtsam { } bool isLeaf() const override { return true; } + + private: + using Base = DecisionTree::Node; + + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + ar& BOOST_SERIALIZATION_NVP(constant_); + ar& BOOST_SERIALIZATION_NVP(nrAssignments_); + } }; // Leaf /****************************************************************************/ @@ -177,6 +186,9 @@ namespace gtsam { using ChoicePtr = boost::shared_ptr; public: + /// Default constructor for serialization. + Choice() {} + ~Choice() override { #ifdef DT_DEBUG_MEMORY std::std::cout << Node::nrNodes << " destructing (Choice) " << this->id() @@ -428,6 +440,19 @@ namespace gtsam { r->push_back(branch->choose(label, index)); return Unique(r); } + + private: + using Base = DecisionTree::Node; + + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + ar& BOOST_SERIALIZATION_NVP(label_); + ar& BOOST_SERIALIZATION_NVP(branches_); + ar& BOOST_SERIALIZATION_NVP(allSame_); + } }; // Choice /****************************************************************************/ @@ -504,8 +529,7 @@ namespace gtsam { template DecisionTree::DecisionTree(const L& label, const DecisionTree& f0, const DecisionTree& f1) { - std::vector functions; - functions += f0, f1; + const std::vector functions{f0, f1}; root_ = compose(functions.begin(), functions.end(), label); } diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 957a4eb480..a8764a98f7 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -19,9 +19,11 @@ #pragma once +#include #include #include +#include #include #include #include @@ -113,6 +115,12 @@ namespace gtsam { virtual Ptr apply_g_op_fC(const Choice&, const Binary&) const = 0; virtual Ptr choose(const L& label, size_t index) const = 0; virtual bool isLeaf() const = 0; + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) {} }; /** ------------------------ Node base class --------------------------- */ @@ -236,7 +244,7 @@ namespace gtsam { /** * @brief Visit all leaves in depth-first fashion. * - * @param f (side-effect) Function taking a value. + * @param f (side-effect) Function taking the value of the leaf node. * * @note Due to pruning, the number of leaves may not be the same as the * number of assignments. E.g. if we have a tree on 2 binary variables with @@ -245,7 +253,7 @@ namespace gtsam { * Example: * int sum = 0; * auto visitor = [&](int y) { sum += y; }; - * tree.visitWith(visitor); + * tree.visit(visitor); */ template void visit(Func f) const; @@ -261,8 +269,8 @@ namespace gtsam { * * Example: * int sum = 0; - * auto visitor = [&](int y) { sum += y; }; - * tree.visitWith(visitor); + * auto visitor = [&](const Leaf& leaf) { sum += leaf.constant(); }; + * tree.visitLeaf(visitor); */ template void visitLeaf(Func f) const; @@ -364,8 +372,19 @@ namespace gtsam { compose(Iterator begin, Iterator end, const L& label) const; /// @} + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_NVP(root_); + } }; // DecisionTree + template + struct traits> : public Testable> {}; + /** free versions of apply */ /// Apply unary operator `op` to DecisionTree `f`. diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index 4e16fc689e..14a24b6e62 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -18,6 +18,7 @@ */ #include +#include #include #include @@ -56,6 +57,16 @@ namespace gtsam { } } + /* ************************************************************************ */ + double DecisionTreeFactor::error(const DiscreteValues& values) const { + return -std::log(evaluate(values)); + } + + /* ************************************************************************ */ + double DecisionTreeFactor::error(const HybridValues& values) const { + return error(values.discrete()); + } + /* ************************************************************************ */ double DecisionTreeFactor::safe_div(const double& a, const double& b) { // The use for safe_div is when we divide the product factor by the sum @@ -156,9 +167,9 @@ namespace gtsam { std::vector> DecisionTreeFactor::enumerate() const { // Get all possible assignments - std::vector> pairs = discreteKeys(); + DiscreteKeys pairs = discreteKeys(); // Reverse to make cartesian product output a more natural ordering. - std::vector> rpairs(pairs.rbegin(), pairs.rend()); + DiscreteKeys rpairs(pairs.rbegin(), pairs.rend()); const auto assignments = DiscreteValues::CartesianProduct(rpairs); // Construct unordered_map with values diff --git a/gtsam/discrete/DecisionTreeFactor.h b/gtsam/discrete/DecisionTreeFactor.h index bb9ddbd961..dd292cae8a 100644 --- a/gtsam/discrete/DecisionTreeFactor.h +++ b/gtsam/discrete/DecisionTreeFactor.h @@ -34,6 +34,7 @@ namespace gtsam { class DiscreteConditional; + class HybridValues; /** * A discrete probabilistic factor. @@ -97,11 +98,20 @@ namespace gtsam { /// @name Standard Interface /// @{ - /// Value is just look up in AlgebraicDecisonTree + /// Calculate probability for given values `x`, + /// is just look up in AlgebraicDecisionTree. + double evaluate(const DiscreteValues& values) const { + return ADT::operator()(values); + } + + /// Evaluate probability density, sugar. double operator()(const DiscreteValues& values) const override { return ADT::operator()(values); } + /// Calculate error for DiscreteValues `x`, is -log(probability). + double error(const DiscreteValues& values) const; + /// multiply two factors DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override { return apply(f, ADT::Ring::mul); @@ -230,7 +240,27 @@ namespace gtsam { std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, const Names& names = {}) const override; - /// @} + /// @} + /// @name HybridValues methods. + /// @{ + + /** + * Calculate error for HybridValues `x`, is -log(probability) + * Simply dispatches to DiscreteValues version. + */ + double error(const HybridValues& values) const override; + + /// @} + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(ADT); + ar& BOOST_SERIALIZATION_NVP(cardinalities_); + } }; // traits diff --git a/gtsam/discrete/DiscreteBayesNet.cpp b/gtsam/discrete/DiscreteBayesNet.cpp index ccc52585e6..7cd190cc26 100644 --- a/gtsam/discrete/DiscreteBayesNet.cpp +++ b/gtsam/discrete/DiscreteBayesNet.cpp @@ -33,6 +33,15 @@ bool DiscreteBayesNet::equals(const This& bn, double tol) const { return Base::equals(bn, tol); } +/* ************************************************************************* */ +double DiscreteBayesNet::logProbability(const DiscreteValues& values) const { + // evaluate all conditionals and add + double result = 0.0; + for (const DiscreteConditional::shared_ptr& conditional : *this) + result += conditional->logProbability(values); + return result; +} + /* ************************************************************************* */ double DiscreteBayesNet::evaluate(const DiscreteValues& values) const { // evaluate all conditionals and multiply diff --git a/gtsam/discrete/DiscreteBayesNet.h b/gtsam/discrete/DiscreteBayesNet.h index 8ded827ceb..700f81d7e5 100644 --- a/gtsam/discrete/DiscreteBayesNet.h +++ b/gtsam/discrete/DiscreteBayesNet.h @@ -103,6 +103,9 @@ class GTSAM_EXPORT DiscreteBayesNet: public BayesNet { return evaluate(values); } + //** log(evaluate(values)) for given DiscreteValues */ + double logProbability(const DiscreteValues & values) const; + /** * @brief do ancestral sampling * @@ -136,7 +139,15 @@ class GTSAM_EXPORT DiscreteBayesNet: public BayesNet { std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, const DiscreteFactor::Names& names = {}) const; - ///@} + /// @} + /// @name HybridValues methods. + /// @{ + + using Base::error; // Expose error(const HybridValues&) method.. + using Base::evaluate; // Expose evaluate(const HybridValues&) method.. + using Base::logProbability; // Expose logProbability(const HybridValues&) + + /// @} #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 /// @name Deprecated functionality diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 0d6c5e976f..214bc64da2 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -510,6 +510,10 @@ string DiscreteConditional::html(const KeyFormatter& keyFormatter, return ss.str(); } +/* ************************************************************************* */ +double DiscreteConditional::evaluate(const HybridValues& x) const{ + return this->evaluate(x.discrete()); +} /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 6a286633d3..a4e1f011b7 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -18,9 +18,9 @@ #pragma once +#include #include #include -#include #include #include @@ -147,6 +147,11 @@ class GTSAM_EXPORT DiscreteConditional /// @name Standard Interface /// @{ + /// Log-probability is just -error(x). + double logProbability(const DiscreteValues& x) const { + return -error(x); + } + /// print index signature only void printSignature( const std::string& s = "Discrete Conditional: ", @@ -155,10 +160,13 @@ class GTSAM_EXPORT DiscreteConditional } /// Evaluate, just look up in AlgebraicDecisonTree - double operator()(const DiscreteValues& values) const override { + double evaluate(const DiscreteValues& values) const { return ADT::operator()(values); } + using DecisionTreeFactor::error; ///< DiscreteValues version + using DecisionTreeFactor::operator(); ///< DiscreteValues version + /** * @brief restrict to given *parent* values. * @@ -225,6 +233,34 @@ class GTSAM_EXPORT DiscreteConditional std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, const Names& names = {}) const override; + + /// @} + /// @name HybridValues methods. + /// @{ + + /** + * Calculate probability for HybridValues `x`. + * Dispatches to DiscreteValues version. + */ + double evaluate(const HybridValues& x) const override; + + using BaseConditional::operator(); ///< HybridValues version + + /** + * Calculate log-probability log(evaluate(x)) for HybridValues `x`. + * This is actually just -error(x). + */ + double logProbability(const HybridValues& x) const override { + return -error(x); + } + + /** + * logNormalizationConstant K is just zero, such that + * logProbability(x) = log(evaluate(x)) = - error(x) + * and hence error(x) = - log(evaluate(x)) > 0 for all x. + */ + double logNormalizationConstant() const override { return 0.0; } + /// @} #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 @@ -239,6 +275,15 @@ class GTSAM_EXPORT DiscreteConditional /// Internal version of choose DiscreteConditional::ADT choose(const DiscreteValues& given, bool forceComplete) const; + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseFactor); + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseConditional); + } }; // DiscreteConditional diff --git a/gtsam/discrete/DiscreteFactor.cpp b/gtsam/discrete/DiscreteFactor.cpp index 08309e2e17..2b1bc36a3a 100644 --- a/gtsam/discrete/DiscreteFactor.cpp +++ b/gtsam/discrete/DiscreteFactor.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -27,6 +28,16 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ +double DiscreteFactor::error(const DiscreteValues& values) const { + return -std::log((*this)(values)); +} + +/* ************************************************************************* */ +double DiscreteFactor::error(const HybridValues& c) const { + return this->error(c.discrete()); +} + /* ************************************************************************* */ std::vector expNormalize(const std::vector& logProbs) { double maxLogProb = -std::numeric_limits::infinity(); diff --git a/gtsam/discrete/DiscreteFactor.h b/gtsam/discrete/DiscreteFactor.h index c6bb079fb5..712e1ff75b 100644 --- a/gtsam/discrete/DiscreteFactor.h +++ b/gtsam/discrete/DiscreteFactor.h @@ -27,6 +27,7 @@ namespace gtsam { class DecisionTreeFactor; class DiscreteConditional; +class HybridValues; /** * Base class for discrete probabilistic factors @@ -83,6 +84,15 @@ class GTSAM_EXPORT DiscreteFactor: public Factor { /// Find value for given assignment of values to variables virtual double operator()(const DiscreteValues&) const = 0; + /// Error is just -log(value) + double error(const DiscreteValues& values) const; + + /** + * The Factor::error simply extracts the \class DiscreteValues from the + * \class HybridValues and calculates the error. + */ + double error(const HybridValues& c) const override; + /// Multiply in a DecisionTreeFactor and return the result as DecisionTreeFactor virtual DecisionTreeFactor operator*(const DecisionTreeFactor&) const = 0; diff --git a/gtsam/discrete/DiscreteFactorGraph.h b/gtsam/discrete/DiscreteFactorGraph.h index 818eeda4e7..3fbb64d506 100644 --- a/gtsam/discrete/DiscreteFactorGraph.h +++ b/gtsam/discrete/DiscreteFactorGraph.h @@ -62,9 +62,17 @@ template<> struct EliminationTraits typedef DiscreteBayesTree BayesTreeType; ///< Type of Bayes tree typedef DiscreteJunctionTree JunctionTreeType; ///< Type of Junction tree /// The default dense elimination function - static std::pair, boost::shared_ptr > + static std::pair, + boost::shared_ptr > DefaultEliminate(const FactorGraphType& factors, const Ordering& keys) { - return EliminateDiscrete(factors, keys); } + return EliminateDiscrete(factors, keys); + } + /// The default ordering generation function + static Ordering DefaultOrderingFunc( + const FactorGraphType& graph, + boost::optional variableIndex) { + return Ordering::Colamd(*variableIndex); + } }; /* ************************************************************************* */ @@ -214,6 +222,12 @@ class GTSAM_EXPORT DiscreteFactorGraph std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, const DiscreteFactor::Names& names = {}) const; + /// @} + /// @name HybridValues methods. + /// @{ + + using Base::error; // Expose error(const HybridValues&) method.. + /// @} }; // \ DiscreteFactorGraph diff --git a/gtsam/discrete/DiscreteValues.cpp b/gtsam/discrete/DiscreteValues.cpp index 5d0c8dd3d5..b0427e91b7 100644 --- a/gtsam/discrete/DiscreteValues.cpp +++ b/gtsam/discrete/DiscreteValues.cpp @@ -17,6 +17,7 @@ #include +#include #include using std::cout; @@ -26,6 +27,7 @@ using std::stringstream; namespace gtsam { +/* ************************************************************************ */ void DiscreteValues::print(const string& s, const KeyFormatter& keyFormatter) const { cout << s << ": "; @@ -34,6 +36,44 @@ void DiscreteValues::print(const string& s, cout << endl; } +/* ************************************************************************ */ +bool DiscreteValues::equals(const DiscreteValues& x, double tol) const { + if (this->size() != x.size()) return false; + for (const auto values : boost::combine(*this, x)) { + if (values.get<0>() != values.get<1>()) return false; + } + return true; +} + +/* ************************************************************************ */ +DiscreteValues& DiscreteValues::insert(const DiscreteValues& values) { + for (const auto& kv : values) { + if (count(kv.first)) { + throw std::out_of_range( + "Requested to insert a DiscreteValues into another DiscreteValues " + "that already contains one or more of its keys."); + } else { + this->emplace(kv); + } + } + return *this; +} + +/* ************************************************************************ */ +DiscreteValues& DiscreteValues::update(const DiscreteValues& values) { + for (const auto& kv : values) { + if (!count(kv.first)) { + throw std::out_of_range( + "Requested to update a DiscreteValues with another DiscreteValues " + "that contains keys not present in the first."); + } else { + (*this)[kv.first] = kv.second; + } + } + return *this; +} + +/* ************************************************************************ */ string DiscreteValues::Translate(const Names& names, Key key, size_t index) { if (names.empty()) { stringstream ss; @@ -60,6 +100,7 @@ string DiscreteValues::markdown(const KeyFormatter& keyFormatter, return ss.str(); } +/* ************************************************************************ */ string DiscreteValues::html(const KeyFormatter& keyFormatter, const Names& names) const { stringstream ss; @@ -84,6 +125,7 @@ string DiscreteValues::html(const KeyFormatter& keyFormatter, return ss.str(); } +/* ************************************************************************ */ string markdown(const DiscreteValues& values, const KeyFormatter& keyFormatter, const DiscreteValues::Names& names) { return values.markdown(keyFormatter, names); diff --git a/gtsam/discrete/DiscreteValues.h b/gtsam/discrete/DiscreteValues.h index 72bb240816..8a6d6f9303 100644 --- a/gtsam/discrete/DiscreteValues.h +++ b/gtsam/discrete/DiscreteValues.h @@ -27,21 +27,16 @@ namespace gtsam { -/** A map from keys to values - * TODO(dellaert): Do we need this? Should we just use gtsam::DiscreteValues? - * We just need another special DiscreteValue to represent labels, - * However, all other Lie's operators are undefined in this class. - * The good thing is we can have a Hybrid graph of discrete/continuous variables - * together.. - * Another good thing is we don't need to have the special DiscreteKey which - * stores cardinality of a Discrete variable. It should be handled naturally in - * the new class DiscreteValue, as the variable's type (domain) +/** + * A map from keys to values * @ingroup discrete */ class GTSAM_EXPORT DiscreteValues : public Assignment { public: using Base = Assignment; // base class + /// @name Standard Constructors + /// @{ using Assignment::Assignment; // all constructors // Define the implicit default constructor. @@ -50,14 +45,49 @@ class GTSAM_EXPORT DiscreteValues : public Assignment { // Construct from assignment. explicit DiscreteValues(const Base& a) : Base(a) {} + // Construct from initializer list. + DiscreteValues(std::initializer_list> init) + : Assignment{init} {} + + /// @} + /// @name Testable + /// @{ + + /// print required by Testable. void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + /// equals required by Testable for unit testing. + bool equals(const DiscreteValues& x, double tol = 1e-9) const; + + /// @} + /// @name Standard Interface + /// @{ + + // insert in base class; + std::pair insert( const value_type& value ){ + return Base::insert(value); + } + + /** Insert all values from \c values. Throws an invalid_argument exception if + * any keys to be inserted are already used. */ + DiscreteValues& insert(const DiscreteValues& values); + + /** For all key/value pairs in \c values, replace values with corresponding + * keys in this object with those in \c values. Throws std::out_of_range if + * any keys in \c values are not present in this object. */ + DiscreteValues& update(const DiscreteValues& values); + + /** + * @brief Return a vector of DiscreteValues, one for each possible + * combination of values. + */ static std::vector CartesianProduct( const DiscreteKeys& keys) { return Base::CartesianProduct(keys); } + /// @} /// @name Wrapper support /// @{ diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index fa98f36fa5..78efd57e28 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -82,6 +82,7 @@ virtual class DecisionTreeFactor : gtsam::DiscreteFactor { }; #include +#include virtual class DiscreteConditional : gtsam::DecisionTreeFactor { DiscreteConditional(); DiscreteConditional(size_t nFrontals, const gtsam::DecisionTreeFactor& f); @@ -95,6 +96,12 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { DiscreteConditional(const gtsam::DecisionTreeFactor& joint, const gtsam::DecisionTreeFactor& marginal, const gtsam::Ordering& orderedKeys); + + // Standard interface + double logNormalizationConstant() const; + double logProbability(const gtsam::DiscreteValues& values) const; + double evaluate(const gtsam::DiscreteValues& values) const; + double error(const gtsam::DiscreteValues& values) const; gtsam::DiscreteConditional operator*( const gtsam::DiscreteConditional& other) const; gtsam::DiscreteConditional marginal(gtsam::Key key) const; @@ -116,6 +123,8 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { size_t sample(size_t value) const; size_t sample() const; void sampleInPlace(gtsam::DiscreteValues @parentsValues) const; + + // Markdown and HTML string markdown(const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; string markdown(const gtsam::KeyFormatter& keyFormatter, @@ -124,6 +133,11 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { gtsam::DefaultKeyFormatter) const; string html(const gtsam::KeyFormatter& keyFormatter, std::map> names) const; + + // Expose HybridValues versions + double logProbability(const gtsam::HybridValues& x) const; + double evaluate(const gtsam::HybridValues& x) const; + double error(const gtsam::HybridValues& x) const; }; #include @@ -157,7 +171,12 @@ class DiscreteBayesNet { const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; bool equals(const gtsam::DiscreteBayesNet& other, double tol = 1e-9) const; + + // Standard interface. + double logProbability(const gtsam::DiscreteValues& values) const; + double evaluate(const gtsam::DiscreteValues& values) const; double operator()(const gtsam::DiscreteValues& values) const; + gtsam::DiscreteValues sample() const; gtsam::DiscreteValues sample(gtsam::DiscreteValues given) const; diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index 6a3fb23884..e3f9d9dd2e 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -25,10 +25,7 @@ #include // for convert only #define DISABLE_TIMING -#include -#include #include -using namespace boost::assign; #include #include @@ -402,13 +399,9 @@ TEST(ADT, factor_graph) { /* ************************************************************************* */ // test equality TEST(ADT, equality_noparser) { - DiscreteKey A(0, 2), B(1, 2); - Signature::Table tableA, tableB; - Signature::Row rA, rB; - rA += 80, 20; - rB += 60, 40; - tableA += rA; - tableB += rB; + const DiscreteKey A(0, 2), B(1, 2); + const Signature::Row rA{80, 20}, rB{60, 40}; + const Signature::Table tableA{rA}, tableB{rB}; // Check straight equality ADT pA1 = create(A % tableA); @@ -523,9 +516,9 @@ TEST(ADT, elimination) { // normalize ADT actual = f1 / actualSum; - vector cpt; - cpt += 1.0 / 3, 2.0 / 3, 3.0 / 7, 4.0 / 7, 5.0 / 11, 6.0 / 11, // - 1.0 / 9, 8.0 / 9, 3.0 / 6, 3.0 / 6, 5.0 / 10, 5.0 / 10; + const vector cpt{ + 1.0 / 3, 2.0 / 3, 3.0 / 7, 4.0 / 7, 5.0 / 11, 6.0 / 11, // + 1.0 / 9, 8.0 / 9, 3.0 / 6, 3.0 / 6, 5.0 / 10, 5.0 / 10}; ADT expected(A & B & C, cpt); CHECK(assert_equal(expected, actual)); } @@ -538,9 +531,9 @@ TEST(ADT, elimination) { // normalize ADT actual = f1 / actualSum; - vector cpt; - cpt += 1.0 / 21, 2.0 / 21, 3.0 / 21, 4.0 / 21, 5.0 / 21, 6.0 / 21, // - 1.0 / 25, 8.0 / 25, 3.0 / 25, 3.0 / 25, 5.0 / 25, 5.0 / 25; + const vector cpt{ + 1.0 / 21, 2.0 / 21, 3.0 / 21, 4.0 / 21, 5.0 / 21, 6.0 / 21, // + 1.0 / 25, 8.0 / 25, 3.0 / 25, 3.0 / 25, 5.0 / 25, 5.0 / 25}; ADT expected(A & B & C, cpt); CHECK(assert_equal(expected, actual)); } diff --git a/gtsam/discrete/tests/testDecisionTree.cpp b/gtsam/discrete/tests/testDecisionTree.cpp index 5ccbcf9162..2f385263c1 100644 --- a/gtsam/discrete/tests/testDecisionTree.cpp +++ b/gtsam/discrete/tests/testDecisionTree.cpp @@ -20,17 +20,15 @@ // #define DT_DEBUG_MEMORY // #define GTSAM_DT_NO_PRUNING #define DISABLE_DOT -#include - +#include #include +#include +#include #include -#include - -#include -using namespace boost::assign; - -using namespace std; +using std::vector; +using std::string; +using std::map; using namespace gtsam; template @@ -284,8 +282,7 @@ TEST(DecisionTree, Compose) { DT f1(B, DT(A, 0, 1), DT(A, 2, 3)); // Create from string - vector keys; - keys += DT::LabelC(A, 2), DT::LabelC(B, 2); + vector keys{DT::LabelC(A, 2), DT::LabelC(B, 2)}; DT f2(keys, "0 2 1 3"); EXPECT(assert_equal(f2, f1, 1e-9)); @@ -295,7 +292,7 @@ TEST(DecisionTree, Compose) { DOT(f4); // a bigger tree - keys += DT::LabelC(C, 2); + keys.push_back(DT::LabelC(C, 2)); DT f5(keys, "0 4 2 6 1 5 3 7"); EXPECT(assert_equal(f5, f4, 1e-9)); DOT(f5); @@ -326,7 +323,7 @@ TEST(DecisionTree, Containers) { /* ************************************************************************** */ // Test nrAssignments. TEST(DecisionTree, NrAssignments) { - pair A("A", 2), B("B", 2), C("C", 2); + const std::pair A("A", 2), B("B", 2), C("C", 2); DT tree({A, B, C}, "1 1 1 1 1 1 1 1"); EXPECT(tree.root_->isLeaf()); auto leaf = boost::dynamic_pointer_cast(tree.root_); @@ -476,8 +473,8 @@ TEST(DecisionTree, unzip) { // Test thresholding. TEST(DecisionTree, threshold) { // Create three level tree - vector keys; - keys += DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2); + const vector keys{DT::LabelC("C", 2), DT::LabelC("B", 2), + DT::LabelC("A", 2)}; DT tree(keys, "0 1 2 3 4 5 6 7"); // Check number of leaves equal to zero @@ -499,8 +496,8 @@ TEST(DecisionTree, threshold) { // Test apply with assignment. TEST(DecisionTree, ApplyWithAssignment) { // Create three level tree - vector keys; - keys += DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2); + const vector keys{DT::LabelC("C", 2), DT::LabelC("B", 2), + DT::LabelC("A", 2)}; DT tree(keys, "1 2 3 4 5 6 7 8"); DecisionTree probTree( diff --git a/gtsam/discrete/tests/testDecisionTreeFactor.cpp b/gtsam/discrete/tests/testDecisionTreeFactor.cpp index 84e45a0f54..3dbb3e64f3 100644 --- a/gtsam/discrete/tests/testDecisionTreeFactor.cpp +++ b/gtsam/discrete/tests/testDecisionTreeFactor.cpp @@ -19,13 +19,11 @@ #include #include +#include #include #include #include -#include -using namespace boost::assign; - using namespace std; using namespace gtsam; @@ -50,6 +48,9 @@ TEST( DecisionTreeFactor, constructors) EXPECT_DOUBLES_EQUAL(8, f1(values), 1e-9); EXPECT_DOUBLES_EQUAL(7, f2(values), 1e-9); EXPECT_DOUBLES_EQUAL(75, f3(values), 1e-9); + + // Assert that error = -log(value) + EXPECT_DOUBLES_EQUAL(-log(f1(values)), f1.error(values), 1e-9); } /* ************************************************************************* */ diff --git a/gtsam/discrete/tests/testDiscreteBayesNet.cpp b/gtsam/discrete/tests/testDiscreteBayesNet.cpp index 19af676f7d..0f1fb23cdd 100644 --- a/gtsam/discrete/tests/testDiscreteBayesNet.cpp +++ b/gtsam/discrete/tests/testDiscreteBayesNet.cpp @@ -25,12 +25,6 @@ #include - -#include -#include - -using namespace boost::assign; - #include #include #include @@ -106,6 +100,11 @@ TEST(DiscreteBayesNet, Asia) { DiscreteConditional expected2(Bronchitis % "11/9"); EXPECT(assert_equal(expected2, *chordal->back())); + // Check evaluate and logProbability + auto result = fg.optimize(); + EXPECT_DOUBLES_EQUAL(asia.logProbability(result), + std::log(asia.evaluate(result)), 1e-9); + // add evidence, we were in Asia and we have dyspnea fg.add(Asia, "0 1"); fg.add(Dyspnea, "0 1"); @@ -115,11 +114,11 @@ TEST(DiscreteBayesNet, Asia) { EXPECT(assert_equal(expected2, *chordal->back())); // now sample from it - DiscreteValues expectedSample; + DiscreteValues expectedSample{{Asia.first, 1}, {Dyspnea.first, 1}, + {XRay.first, 1}, {Tuberculosis.first, 0}, + {Smoking.first, 1}, {Either.first, 1}, + {LungCancer.first, 1}, {Bronchitis.first, 0}}; SETDEBUG("DiscreteConditional::sample", false); - insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)( - Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)( - LungCancer.first, 1)(Bronchitis.first, 0); auto actualSample = chordal2->sample(); EXPECT(assert_equal(expectedSample, actualSample)); } diff --git a/gtsam/discrete/tests/testDiscreteBayesTree.cpp b/gtsam/discrete/tests/testDiscreteBayesTree.cpp index ab69e82d77..0a7dc72f42 100644 --- a/gtsam/discrete/tests/testDiscreteBayesTree.cpp +++ b/gtsam/discrete/tests/testDiscreteBayesTree.cpp @@ -21,9 +21,6 @@ #include #include -#include -using namespace boost::assign; - #include #include diff --git a/gtsam/discrete/tests/testDiscreteConditional.cpp b/gtsam/discrete/tests/testDiscreteConditional.cpp index 13a34dd19d..6e73cfc6eb 100644 --- a/gtsam/discrete/tests/testDiscreteConditional.cpp +++ b/gtsam/discrete/tests/testDiscreteConditional.cpp @@ -17,16 +17,14 @@ * @date Feb 14, 2011 */ -#include -#include -#include -using namespace boost::assign; - #include +#include #include #include #include +#include + using namespace std; using namespace gtsam; @@ -55,12 +53,8 @@ TEST(DiscreteConditional, constructors) { TEST(DiscreteConditional, constructors_alt_interface) { DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering ! - Signature::Table table; - Signature::Row r1, r2, r3; - r1 += 1.0, 1.0; - r2 += 2.0, 3.0; - r3 += 1.0, 4.0; - table += r1, r2, r3; + const Signature::Row r1{1, 1}, r2{2, 3}, r3{1, 4}; + const Signature::Table table{r1, r2, r3}; DiscreteConditional actual1(X, {Y}, table); DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8"); @@ -94,6 +88,31 @@ TEST(DiscreteConditional, constructors3) { EXPECT(assert_equal(expected, static_cast(actual))); } +/* ****************************************************************************/ +// Test evaluate for a discrete Prior P(Asia). +TEST(DiscreteConditional, PriorProbability) { + constexpr Key asiaKey = 0; + const DiscreteKey Asia(asiaKey, 2); + DiscreteConditional dc(Asia, "4/6"); + DiscreteValues values{{asiaKey, 0}}; + EXPECT_DOUBLES_EQUAL(0.4, dc.evaluate(values), 1e-9); + EXPECT(DiscreteConditional::CheckInvariants(dc, values)); +} + +/* ************************************************************************* */ +// Check that error, logProbability, evaluate all work as expected. +TEST(DiscreteConditional, probability) { + DiscreteKey C(2, 2), D(4, 2), E(3, 2); + DiscreteConditional C_given_DE((C | D, E) = "4/1 1/1 1/1 1/4"); + + DiscreteValues given {{C.first, 1}, {D.first, 0}, {E.first, 0}}; + EXPECT_DOUBLES_EQUAL(0.2, C_given_DE.evaluate(given), 1e-9); + EXPECT_DOUBLES_EQUAL(0.2, C_given_DE(given), 1e-9); + EXPECT_DOUBLES_EQUAL(log(0.2), C_given_DE.logProbability(given), 1e-9); + EXPECT_DOUBLES_EQUAL(-log(0.2), C_given_DE.error(given), 1e-9); + EXPECT(DiscreteConditional::CheckInvariants(C_given_DE, given)); +} + /* ************************************************************************* */ // Check calculation of joint P(A,B) TEST(DiscreteConditional, Multiply) { @@ -212,7 +231,6 @@ TEST(DiscreteConditional, marginals2) { DiscreteConditional conditional(A | B = "2/2 3/1"); DiscreteConditional prior(B % "1/2"); DiscreteConditional pAB = prior * conditional; - GTSAM_PRINT(pAB); // P(A=0) = P(A=0|B=0)P(B=0) + P(A=0|B=1)P(B=1) = 2*1 + 3*2 = 8 // P(A=1) = P(A=1|B=0)P(B=0) + P(A=1|B=1)P(B=1) = 2*1 + 1*2 = 4 DiscreteConditional actualA = pAB.marginal(A.first); diff --git a/gtsam/discrete/tests/testDiscreteFactor.cpp b/gtsam/discrete/tests/testDiscreteFactor.cpp index db0491c9d6..7fc5ee248b 100644 --- a/gtsam/discrete/tests/testDiscreteFactor.cpp +++ b/gtsam/discrete/tests/testDiscreteFactor.cpp @@ -21,9 +21,6 @@ #include #include -#include -using namespace boost::assign; - using namespace std; using namespace gtsam; using namespace gtsam::serializationTestHelpers; diff --git a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp index 3d9621affa..2d79b9de9e 100644 --- a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp +++ b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp @@ -23,9 +23,6 @@ #include -#include -using namespace boost::assign; - using namespace std; using namespace gtsam; @@ -49,9 +46,7 @@ TEST_UNSAFE(DiscreteFactorGraph, debugScheduler) { // Check MPE. auto actualMPE = graph.optimize(); - DiscreteValues mpe; - insert(mpe)(0, 2)(1, 1)(2, 0)(3, 0); - EXPECT(assert_equal(mpe, actualMPE)); + EXPECT(assert_equal({{0, 2}, {1, 1}, {2, 0}, {3, 0}}, actualMPE)); } /* ************************************************************************* */ @@ -149,8 +144,7 @@ TEST(DiscreteFactorGraph, test) { EXPECT(assert_equal(expectedBayesNet, *actual2)); // Test mpe - DiscreteValues mpe; - insert(mpe)(0, 0)(1, 0)(2, 0); + DiscreteValues mpe { {0, 0}, {1, 0}, {2, 0}}; auto actualMPE = graph.optimize(); EXPECT(assert_equal(mpe, actualMPE)); EXPECT_DOUBLES_EQUAL(9, graph(mpe), 1e-5); // regression @@ -182,8 +176,7 @@ TEST_UNSAFE(DiscreteFactorGraph, testMaxProduct) { graph.add(C & B, "0.1 0.9 0.4 0.6"); // Created expected MPE - DiscreteValues mpe; - insert(mpe)(0, 0)(1, 1)(2, 1); + DiscreteValues mpe{{0, 0}, {1, 1}, {2, 1}}; // Do max-product with different orderings for (Ordering::OrderingType orderingType : @@ -209,8 +202,7 @@ TEST(DiscreteFactorGraph, marginalIsNotMPE) { bayesNet.add(A % "10/9"); // The expected MPE is A=1, B=1 - DiscreteValues mpe; - insert(mpe)(0, 1)(1, 1); + DiscreteValues mpe { {0, 1}, {1, 1} }; // Which we verify using max-product: DiscreteFactorGraph graph(bayesNet); @@ -240,8 +232,7 @@ TEST(DiscreteFactorGraph, testMPE_Darwiche09book_p244) { graph.add(T1 & T2 & A, "1 0 0 1 0 1 1 0"); graph.add(A, "1 0"); // evidence, A = yes (first choice in Darwiche) - DiscreteValues mpe; - insert(mpe)(4, 0)(2, 1)(3, 1)(0, 1)(1, 1); + DiscreteValues mpe { {0, 1}, {1, 1}, {2, 1}, {3, 1}, {4, 0}}; EXPECT_DOUBLES_EQUAL(0.33858, graph(mpe), 1e-5); // regression // You can check visually by printing product: // graph.product().print("Darwiche-product"); @@ -267,112 +258,6 @@ TEST(DiscreteFactorGraph, testMPE_Darwiche09book_p244) { EXPECT_LONGS_EQUAL(2, bayesTree->size()); } -#ifdef OLD - -/* ************************************************************************* */ -/** - * Key type for discrete conditionals - * Includes name and cardinality - */ -class Key2 { -private: -std::string wff_; -size_t cardinality_; -public: -/** Constructor, defaults to binary */ -Key2(const std::string& name, size_t cardinality = 2) : -wff_(name), cardinality_(cardinality) { -} -const std::string& name() const { - return wff_; -} - -/** provide streaming */ -friend std::ostream& operator <<(std::ostream &os, const Key2 &key); -}; - -struct Factor2 { -std::string wff_; -Factor2() : -wff_("@") { -} -Factor2(const std::string& s) : -wff_(s) { -} -Factor2(const Key2& key) : -wff_(key.name()) { -} - -friend std::ostream& operator <<(std::ostream &os, const Factor2 &f); -friend Factor2 operator -(const Key2& key); -}; - -std::ostream& operator <<(std::ostream &os, const Factor2 &f) { -os << f.wff_; -return os; -} - -/** negation */ -Factor2 operator -(const Key2& key) { -return Factor2("-" + key.name()); -} - -/** OR */ -Factor2 operator ||(const Factor2 &factor1, const Factor2 &factor2) { -return Factor2(std::string("(") + factor1.wff_ + " || " + factor2.wff_ + ")"); -} - -/** AND */ -Factor2 operator &&(const Factor2 &factor1, const Factor2 &factor2) { -return Factor2(std::string("(") + factor1.wff_ + " && " + factor2.wff_ + ")"); -} - -/** implies */ -Factor2 operator >>(const Factor2 &factor1, const Factor2 &factor2) { -return Factor2(std::string("(") + factor1.wff_ + " >> " + factor2.wff_ + ")"); -} - -struct Graph2: public std::list { - -/** Add a factor graph*/ -// void operator +=(const Graph2& graph) { -// for(const Factor2& f: graph) -// push_back(f); -// } -friend std::ostream& operator <<(std::ostream &os, const Graph2& graph); - -}; - -/** Add a factor */ -//Graph2 operator +=(Graph2& graph, const Factor2& factor) { -// graph.push_back(factor); -// return graph; -//} -std::ostream& operator <<(std::ostream &os, const Graph2& graph) { -for(const Factor2& f: graph) -os << f << endl; -return os; -} - -/* ************************************************************************* */ -TEST(DiscreteFactorGraph, Sugar) -{ -Key2 M("Mythical"), I("Immortal"), A("Mammal"), H("Horned"), G("Magical"); - -// Test this desired construction -Graph2 unicorns; -unicorns += M >> -A; -unicorns += (-M) >> (-I && A); -unicorns += (I || A) >> H; -unicorns += H >> G; - -// should be done by adapting boost::assign: -// unicorns += (-M) >> (-I && A), (I || A) >> H , H >> G; - -cout << unicorns; -} -#endif - /* ************************************************************************* */ TEST(DiscreteFactorGraph, Dot) { // Create Factor graph diff --git a/gtsam/discrete/tests/testDiscreteLookupDAG.cpp b/gtsam/discrete/tests/testDiscreteLookupDAG.cpp index 04b8597804..8c3ff5a1da 100644 --- a/gtsam/discrete/tests/testDiscreteLookupDAG.cpp +++ b/gtsam/discrete/tests/testDiscreteLookupDAG.cpp @@ -20,11 +20,7 @@ #include #include -#include -#include - using namespace gtsam; -using namespace boost::assign; /* ************************************************************************* */ TEST(DiscreteLookupDAG, argmax) { @@ -43,8 +39,7 @@ TEST(DiscreteLookupDAG, argmax) { dag.add(1, DiscreteKeys{A}, adtA); // The expected MPE is A=1, B=1 - DiscreteValues mpe; - insert(mpe)(0, 1)(1, 1); + DiscreteValues mpe{{0, 1}, {1, 1}}; // check: auto actualMPE = dag.argmax(); diff --git a/gtsam/discrete/tests/testDiscreteMarginals.cpp b/gtsam/discrete/tests/testDiscreteMarginals.cpp index 3208f81c53..ffca8a481a 100644 --- a/gtsam/discrete/tests/testDiscreteMarginals.cpp +++ b/gtsam/discrete/tests/testDiscreteMarginals.cpp @@ -19,9 +19,6 @@ #include -#include -using namespace boost::assign; - #include using namespace std; @@ -186,8 +183,7 @@ TEST_UNSAFE(DiscreteMarginals, truss2) { F[j] /= sum; // Marginals - vector table; - table += F[j], T[j]; + const vector table{F[j], T[j]}; DecisionTreeFactor expectedM(key[j], table); DiscreteFactor::shared_ptr actualM = marginals(j); EXPECT(assert_equal( diff --git a/gtsam/discrete/tests/testDiscreteValues.cpp b/gtsam/discrete/tests/testDiscreteValues.cpp index c8a1fa1680..47989481cf 100644 --- a/gtsam/discrete/tests/testDiscreteValues.cpp +++ b/gtsam/discrete/tests/testDiscreteValues.cpp @@ -21,18 +21,28 @@ #include #include -#include -using namespace boost::assign; - using namespace std; using namespace gtsam; +static const DiscreteValues kExample{{12, 1}, {5, 0}}; + +/* ************************************************************************* */ +// Check insert +TEST(DiscreteValues, Insert) { + EXPECT(assert_equal({{12, 1}, {5, 0}, {13, 2}}, + DiscreteValues(kExample).insert({{13, 2}}))); +} + +/* ************************************************************************* */ +// Check update. +TEST(DiscreteValues, Update) { + EXPECT(assert_equal({{12, 2}, {5, 0}}, + DiscreteValues(kExample).update({{12, 2}}))); +} + /* ************************************************************************* */ // Check markdown representation with a value formatter. TEST(DiscreteValues, markdownWithValueFormatter) { - DiscreteValues values; - values[12] = 1; // A - values[5] = 0; // B string expected = "|Variable|value|\n" "|:-:|:-:|\n" @@ -40,16 +50,13 @@ TEST(DiscreteValues, markdownWithValueFormatter) { "|A|One|\n"; auto keyFormatter = [](Key key) { return key == 12 ? "A" : "B"; }; DiscreteValues::Names names{{12, {"Zero", "One", "Two"}}, {5, {"-", "+"}}}; - string actual = values.markdown(keyFormatter, names); + string actual = kExample.markdown(keyFormatter, names); EXPECT(actual == expected); } /* ************************************************************************* */ // Check html representation with a value formatter. TEST(DiscreteValues, htmlWithValueFormatter) { - DiscreteValues values; - values[12] = 1; // A - values[5] = 0; // B string expected = "
\n" "\n" @@ -64,7 +71,7 @@ TEST(DiscreteValues, htmlWithValueFormatter) { ""; auto keyFormatter = [](Key key) { return key == 12 ? "A" : "B"; }; DiscreteValues::Names names{{12, {"Zero", "One", "Two"}}, {5, {"-", "+"}}}; - string actual = values.html(keyFormatter, names); + string actual = kExample.html(keyFormatter, names); EXPECT(actual == expected); } diff --git a/gtsam/discrete/tests/testSerializationDiscrete.cpp b/gtsam/discrete/tests/testSerializationDiscrete.cpp new file mode 100644 index 0000000000..df7df0b7ec --- /dev/null +++ b/gtsam/discrete/tests/testSerializationDiscrete.cpp @@ -0,0 +1,105 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * testSerializtionDiscrete.cpp + * + * @date January 2023 + * @author Varun Agrawal + */ + +#include +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +using Tree = gtsam::DecisionTree; + +BOOST_CLASS_EXPORT_GUID(Tree, "gtsam_DecisionTreeStringInt") +BOOST_CLASS_EXPORT_GUID(Tree::Leaf, "gtsam_DecisionTreeStringInt_Leaf") +BOOST_CLASS_EXPORT_GUID(Tree::Choice, "gtsam_DecisionTreeStringInt_Choice") + +BOOST_CLASS_EXPORT_GUID(DecisionTreeFactor, "gtsam_DecisionTreeFactor"); + +using ADT = AlgebraicDecisionTree; +BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); +BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf") +BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") + +/* ****************************************************************************/ +// Test DecisionTree serialization. +TEST(DiscreteSerialization, DecisionTree) { + Tree tree({{"A", 2}}, std::vector{1, 2}); + + using namespace serializationTestHelpers; + + // Object roundtrip + Tree outputObj = create(); + roundtrip(tree, outputObj); + EXPECT(tree.equals(outputObj)); + + // XML roundtrip + Tree outputXml = create(); + roundtripXML(tree, outputXml); + EXPECT(tree.equals(outputXml)); + + // Binary roundtrip + Tree outputBinary = create(); + roundtripBinary(tree, outputBinary); + EXPECT(tree.equals(outputBinary)); +} + +/* ************************************************************************* */ +// Check serialization for AlgebraicDecisionTree and the DecisionTreeFactor +TEST(DiscreteSerialization, DecisionTreeFactor) { + using namespace serializationTestHelpers; + + DiscreteKey A(1, 2), B(2, 2), C(3, 2); + + DecisionTreeFactor::ADT tree(A & B & C, "1 5 3 7 2 6 4 8"); + EXPECT(equalsObj(tree)); + EXPECT(equalsXML(tree)); + EXPECT(equalsBinary(tree)); + + DecisionTreeFactor f(A & B & C, "1 5 3 7 2 6 4 8"); + EXPECT(equalsObj(f)); + EXPECT(equalsXML(f)); + EXPECT(equalsBinary(f)); +} + +/* ************************************************************************* */ +// Check serialization for DiscreteConditional & DiscreteDistribution +TEST(DiscreteSerialization, DiscreteConditional) { + using namespace serializationTestHelpers; + + DiscreteKey A(Symbol('x', 1), 3); + DiscreteConditional conditional(A % "1/2/2"); + + EXPECT(equalsObj(conditional)); + EXPECT(equalsXML(conditional)); + EXPECT(equalsBinary(conditional)); + + DiscreteDistribution P(A % "3/2/1"); + EXPECT(equalsObj(P)); + EXPECT(equalsXML(P)); + EXPECT(equalsBinary(P)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/discrete/tests/testSignature.cpp b/gtsam/discrete/tests/testSignature.cpp index 737bd8aef0..02e7a1d109 100644 --- a/gtsam/discrete/tests/testSignature.cpp +++ b/gtsam/discrete/tests/testSignature.cpp @@ -21,12 +21,10 @@ #include #include -#include #include using namespace std; using namespace gtsam; -using namespace boost::assign; DiscreteKey X(0, 2), Y(1, 3), Z(2, 2); @@ -57,12 +55,8 @@ TEST(testSignature, simple_conditional) { /* ************************************************************************* */ TEST(testSignature, simple_conditional_nonparser) { - Signature::Table table; - Signature::Row row1, row2, row3; - row1 += 1.0, 1.0; - row2 += 2.0, 3.0; - row3 += 1.0, 4.0; - table += row1, row2, row3; + Signature::Row row1{1, 1}, row2{2, 3}, row3{1, 4}; + Signature::Table table{row1, row2, row3}; Signature sig(X | Y = table); CHECK(sig.key() == X); diff --git a/gtsam/geometry/CameraSet.h b/gtsam/geometry/CameraSet.h index 143d4bc3c3..a814fea87e 100644 --- a/gtsam/geometry/CameraSet.h +++ b/gtsam/geometry/CameraSet.h @@ -18,12 +18,13 @@ #pragma once -#include -#include // for Cheirality exception -#include -#include #include +#include +#include +#include // for Cheirality exception +#include #include + #include namespace gtsam { @@ -31,10 +32,10 @@ namespace gtsam { /** * @brief A set of cameras, all with their own calibration */ -template -class CameraSet : public std::vector > { - -protected: +template +class CameraSet : public std::vector> { + protected: + using Base = std::vector>; /** * 2D measurement and noise model for each of the m views @@ -43,13 +44,11 @@ class CameraSet : public std::vector > typedef typename CAMERA::Measurement Z; typedef typename CAMERA::MeasurementVector ZVector; - static const int D = traits::dimension; ///< Camera dimension - static const int ZDim = traits::dimension; ///< Measurement dimension + static const int D = traits::dimension; ///< Camera dimension + static const int ZDim = traits::dimension; ///< Measurement dimension /// Make a vector of re-projection errors - static Vector ErrorVector(const ZVector& predicted, - const ZVector& measured) { - + static Vector ErrorVector(const ZVector& predicted, const ZVector& measured) { // Check size size_t m = predicted.size(); if (measured.size() != m) @@ -59,7 +58,8 @@ class CameraSet : public std::vector > Vector b(ZDim * m); for (size_t i = 0, row = 0; i < m; i++, row += ZDim) { Vector bi = traits::Local(measured[i], predicted[i]); - if(ZDim==3 && std::isnan(bi(1))){ // if it is a stereo point and the right pixel is missing (nan) + if (ZDim == 3 && std::isnan(bi(1))) { // if it is a stereo point and the + // right pixel is missing (nan) bi(1) = 0; } b.segment(row) = bi; @@ -67,7 +67,8 @@ class CameraSet : public std::vector > return b; } -public: + public: + using Base::Base; // Inherit the vector constructors /// Destructor virtual ~CameraSet() = default; @@ -83,18 +84,15 @@ class CameraSet : public std::vector > */ virtual void print(const std::string& s = "") const { std::cout << s << "CameraSet, cameras = \n"; - for (size_t k = 0; k < this->size(); ++k) - this->at(k).print(s); + for (size_t k = 0; k < this->size(); ++k) this->at(k).print(s); } /// equals bool equals(const CameraSet& p, double tol = 1e-9) const { - if (this->size() != p.size()) - return false; + if (this->size() != p.size()) return false; bool camerasAreEqual = true; for (size_t i = 0; i < this->size(); i++) { - if (this->at(i).equals(p.at(i), tol) == false) - camerasAreEqual = false; + if (this->at(i).equals(p.at(i), tol) == false) camerasAreEqual = false; break; } return camerasAreEqual; @@ -106,11 +104,10 @@ class CameraSet : public std::vector > * matrix this function returns the diagonal blocks. * throws CheiralityException */ - template - ZVector project2(const POINT& point, // - boost::optional Fs = boost::none, // - boost::optional E = boost::none) const { - + template + ZVector project2(const POINT& point, // + boost::optional Fs = boost::none, // + boost::optional E = boost::none) const { static const int N = FixedDimension::value; // Allocate result @@ -135,19 +132,19 @@ class CameraSet : public std::vector > } /// Calculate vector [project2(point)-z] of re-projection errors - template + template Vector reprojectionError(const POINT& point, const ZVector& measured, - boost::optional Fs = boost::none, // - boost::optional E = boost::none) const { + boost::optional Fs = boost::none, // + boost::optional E = boost::none) const { return ErrorVector(project2(point, Fs, E), measured); } /** - * Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix - * G = F' * F - F' * E * P * E' * F - * g = F' * (b - E * P * E' * b) - * Fixed size version - */ + * Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix + * G = F' * F - F' * E * P * E' * F + * g = F' * (b - E * P * E' * b) + * Fixed size version + */ template // N = 2 or 3 (point dimension), ND is the camera dimension static SymmetricBlockMatrix SchurComplement( @@ -158,38 +155,47 @@ class CameraSet : public std::vector > // a single point is observed in m cameras size_t m = Fs.size(); - // Create a SymmetricBlockMatrix (augmented hessian, with extra row/column with info vector) + // Create a SymmetricBlockMatrix (augmented hessian, with extra row/column + // with info vector) size_t M1 = ND * m + 1; - std::vector dims(m + 1); // this also includes the b term + std::vector dims(m + 1); // this also includes the b term std::fill(dims.begin(), dims.end() - 1, ND); dims.back() = 1; SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(M1, M1)); // Blockwise Schur complement - for (size_t i = 0; i < m; i++) { // for each camera + for (size_t i = 0; i < m; i++) { // for each camera const Eigen::Matrix& Fi = Fs[i]; const auto FiT = Fi.transpose(); - const Eigen::Matrix Ei_P = // + const Eigen::Matrix Ei_P = // E.block(ZDim * i, 0, ZDim, N) * P; // D = (Dx2) * ZDim - augmentedHessian.setOffDiagonalBlock(i, m, FiT * b.segment(ZDim * i) // F' * b - - FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1) + augmentedHessian.setOffDiagonalBlock( + i, m, + FiT * b.segment(ZDim * i) // F' * b + - + FiT * + (Ei_P * + (E.transpose() * + b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1) // (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) ) - augmentedHessian.setDiagonalBlock(i, FiT - * (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi)); + augmentedHessian.setDiagonalBlock( + i, + FiT * (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi)); // upper triangular part of the hessian - for (size_t j = i + 1; j < m; j++) { // for each camera + for (size_t j = i + 1; j < m; j++) { // for each camera const Eigen::Matrix& Fj = Fs[j]; // (DxD) = (Dx2) * ( (2x2) * (2xD) ) - augmentedHessian.setOffDiagonalBlock(i, j, -FiT - * (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj)); + augmentedHessian.setOffDiagonalBlock( + i, j, + -FiT * (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj)); } - } // end of for over cameras + } // end of for over cameras augmentedHessian.diagonalBlock(m)(0, 0) += b.squaredNorm(); return augmentedHessian; @@ -297,20 +303,21 @@ class CameraSet : public std::vector > * g = F' * (b - E * P * E' * b) * Fixed size version */ - template // N = 2 or 3 - static SymmetricBlockMatrix SchurComplement(const FBlocks& Fs, - const Matrix& E, const Eigen::Matrix& P, const Vector& b) { - return SchurComplement(Fs, E, P, b); + template // N = 2 or 3 + static SymmetricBlockMatrix SchurComplement( + const FBlocks& Fs, const Matrix& E, const Eigen::Matrix& P, + const Vector& b) { + return SchurComplement(Fs, E, P, b); } /// Computes Point Covariance P, with lambda parameter - template // N = 2 or 3 (point dimension) + template // N = 2 or 3 (point dimension) static void ComputePointCovariance(Eigen::Matrix& P, - const Matrix& E, double lambda, bool diagonalDamping = false) { - + const Matrix& E, double lambda, + bool diagonalDamping = false) { Matrix EtE = E.transpose() * E; - if (diagonalDamping) { // diagonal of the hessian + if (diagonalDamping) { // diagonal of the hessian EtE.diagonal() += lambda * EtE.diagonal(); } else { DenseIndex n = E.cols(); @@ -322,7 +329,7 @@ class CameraSet : public std::vector > /// Computes Point Covariance P, with lambda parameter, dynamic version static Matrix PointCov(const Matrix& E, const double lambda = 0.0, - bool diagonalDamping = false) { + bool diagonalDamping = false) { if (E.cols() == 2) { Matrix2 P2; ComputePointCovariance<2>(P2, E, lambda, diagonalDamping); @@ -339,8 +346,9 @@ class CameraSet : public std::vector > * Dynamic version */ static SymmetricBlockMatrix SchurComplement(const FBlocks& Fblocks, - const Matrix& E, const Vector& b, const double lambda = 0.0, - bool diagonalDamping = false) { + const Matrix& E, const Vector& b, + const double lambda = 0.0, + bool diagonalDamping = false) { if (E.cols() == 2) { Matrix2 P; ComputePointCovariance<2>(P, E, lambda, diagonalDamping); @@ -353,17 +361,17 @@ class CameraSet : public std::vector > } /** - * Applies Schur complement (exploiting block structure) to get a smart factor on cameras, - * and adds the contribution of the smart factor to a pre-allocated augmented Hessian. + * Applies Schur complement (exploiting block structure) to get a smart factor + * on cameras, and adds the contribution of the smart factor to a + * pre-allocated augmented Hessian. */ - template // N = 2 or 3 (point dimension) - static void UpdateSchurComplement(const FBlocks& Fs, const Matrix& E, - const Eigen::Matrix& P, const Vector& b, - const KeyVector& allKeys, const KeyVector& keys, - /*output ->*/SymmetricBlockMatrix& augmentedHessian) { - - assert(keys.size()==Fs.size()); - assert(keys.size()<=allKeys.size()); + template // N = 2 or 3 (point dimension) + static void UpdateSchurComplement( + const FBlocks& Fs, const Matrix& E, const Eigen::Matrix& P, + const Vector& b, const KeyVector& allKeys, const KeyVector& keys, + /*output ->*/ SymmetricBlockMatrix& augmentedHessian) { + assert(keys.size() == Fs.size()); + assert(keys.size() <= allKeys.size()); FastMap KeySlotMap; for (size_t slot = 0; slot < allKeys.size(); slot++) @@ -374,39 +382,49 @@ class CameraSet : public std::vector > // g = F' * (b - E * P * E' * b) // a single point is observed in m cameras - size_t m = Fs.size(); // cameras observing current point - size_t M = (augmentedHessian.rows() - 1) / D; // all cameras in the group - assert(allKeys.size()==M); + size_t m = Fs.size(); // cameras observing current point + size_t M = (augmentedHessian.rows() - 1) / D; // all cameras in the group + assert(allKeys.size() == M); // Blockwise Schur complement - for (size_t i = 0; i < m; i++) { // for each camera in the current factor + for (size_t i = 0; i < m; i++) { // for each camera in the current factor const MatrixZD& Fi = Fs[i]; const auto FiT = Fi.transpose(); - const Eigen::Matrix Ei_P = E.template block( - ZDim * i, 0) * P; + const Eigen::Matrix Ei_P = + E.template block(ZDim * i, 0) * P; // D = (DxZDim) * (ZDim) // allKeys are the list of all camera keys in the group, e.g, (1,3,4,5,7) - // we should map those to a slot in the local (grouped) hessian (0,1,2,3,4) - // Key cameraKey_i = this->keys_[i]; + // we should map those to a slot in the local (grouped) hessian + // (0,1,2,3,4) Key cameraKey_i = this->keys_[i]; DenseIndex aug_i = KeySlotMap.at(keys[i]); // information vector - store previous vector // vectorBlock = augmentedHessian(aug_i, aug_m).knownOffDiagonal(); // add contribution of current factor - augmentedHessian.updateOffDiagonalBlock(aug_i, M, - FiT * b.segment(ZDim * i) // F' * b - - FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1) + augmentedHessian.updateOffDiagonalBlock( + aug_i, M, + FiT * b.segment(ZDim * i) // F' * b + - + FiT * + (Ei_P * + (E.transpose() * + b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1) // (DxD) += (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) ) // add contribution of current factor - // TODO(gareth): Eigen doesn't let us pass the expression. Call eval() for now... - augmentedHessian.updateDiagonalBlock(aug_i, - ((FiT * (Fi - Ei_P * E.template block(ZDim * i, 0).transpose() * Fi))).eval()); + // TODO(gareth): Eigen doesn't let us pass the expression. Call eval() for + // now... + augmentedHessian.updateDiagonalBlock( + aug_i, + ((FiT * + (Fi - + Ei_P * E.template block(ZDim * i, 0).transpose() * Fi))) + .eval()); // upper triangular part of the hessian - for (size_t j = i + 1; j < m; j++) { // for each camera + for (size_t j = i + 1; j < m; j++) { // for each camera const MatrixZD& Fj = Fs[j]; DenseIndex aug_j = KeySlotMap.at(keys[j]); @@ -415,39 +433,38 @@ class CameraSet : public std::vector > // off diagonal block - store previous block // matrixBlock = augmentedHessian(aug_i, aug_j).knownOffDiagonal(); // add contribution of current factor - augmentedHessian.updateOffDiagonalBlock(aug_i, aug_j, - -FiT * (Ei_P * E.template block(ZDim * j, 0).transpose() * Fj)); + augmentedHessian.updateOffDiagonalBlock( + aug_i, aug_j, + -FiT * (Ei_P * E.template block(ZDim * j, 0).transpose() * + Fj)); } - } // end of for over cameras + } // end of for over cameras augmentedHessian.diagonalBlock(M)(0, 0) += b.squaredNorm(); } -private: - + private: /// Serialization function friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & (*this); + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar&(*this); } -public: + public: GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; -template +template const int CameraSet::D; -template +template const int CameraSet::ZDim; -template -struct traits > : public Testable > { -}; +template +struct traits> : public Testable> {}; -template -struct traits > : public Testable > { -}; +template +struct traits> : public Testable> {}; -} // \ namespace gtsam +} // namespace gtsam diff --git a/gtsam/geometry/OrientedPlane3.h b/gtsam/geometry/OrientedPlane3.h index 92d1673793..d0d912ee9a 100644 --- a/gtsam/geometry/OrientedPlane3.h +++ b/gtsam/geometry/OrientedPlane3.h @@ -125,12 +125,14 @@ class GTSAM_EXPORT OrientedPlane3 { } /// Return the normal - inline Unit3 normal() const { + inline Unit3 normal(OptionalJacobian<2, 3> H = boost::none) const { + if (H) *H << I_2x2, Z_2x1; return n_; } /// Return the perpendicular distance to the origin - inline double distance() const { + inline double distance(OptionalJacobian<1, 3> H = boost::none) const { + if (H) *H << 0,0,1; return d_; } }; diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index d8b6daca80..21b44ada57 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -45,7 +45,7 @@ typedef std::vector > Point2Vector; /// multiply with scalar inline Point2 operator*(double s, const Point2& p) { - return p * s; + return Point2(s * p.x(), s * p.y()); } /* diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index 2652fc0730..6a8c61560a 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -158,6 +158,12 @@ bool Pose3::equals(const Pose3& pose, double tol) const { return R_.equals(pose.R_, tol) && traits::Equals(t_, pose.t_, tol); } +/* ************************************************************************* */ +Pose3 Pose3::interpolateRt(const Pose3& T, double t) const { + return Pose3(interpolate(R_, T.R_, t), + interpolate(t_, T.t_, t)); +} + /* ************************************************************************* */ /** Modified from Murray94book version (which assumes w and v normalized?) */ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) { diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 80741e1c3a..678df8376f 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -129,10 +129,7 @@ class GTSAM_EXPORT Pose3: public LieGroup { * @param T End point of interpolation. * @param t A value in [0, 1]. */ - Pose3 interpolateRt(const Pose3& T, double t) const { - return Pose3(interpolate(R_, T.R_, t), - interpolate(t_, T.t_, t)); - } + Pose3 interpolateRt(const Pose3& T, double t) const; /// @} /// @name Lie Group diff --git a/gtsam/geometry/Unit3.cpp b/gtsam/geometry/Unit3.cpp index 6f70d840e3..17169a5f56 100644 --- a/gtsam/geometry/Unit3.cpp +++ b/gtsam/geometry/Unit3.cpp @@ -32,6 +32,14 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ +Unit3::Unit3(const Vector3& p) : p_(p.normalized()) {} + +Unit3::Unit3(double x, double y, double z) : p_(x, y, z) { p_.normalize(); } + +Unit3::Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) { + p_.normalize(); +} /* ************************************************************************* */ Unit3 Unit3::FromPoint3(const Point3& point, OptionalJacobian<2, 3> H) { // 3*3 Derivative of representation with respect to point is 3*3: diff --git a/gtsam/geometry/Unit3.h b/gtsam/geometry/Unit3.h index ebd5c63c94..2989159a35 100644 --- a/gtsam/geometry/Unit3.h +++ b/gtsam/geometry/Unit3.h @@ -67,21 +67,14 @@ class GTSAM_EXPORT Unit3 { } /// Construct from point - explicit Unit3(const Vector3& p) : - p_(p.normalized()) { - } + explicit Unit3(const Vector3& p); /// Construct from x,y,z - Unit3(double x, double y, double z) : - p_(x, y, z) { - p_.normalize(); - } + Unit3(double x, double y, double z); /// Construct from 2D point in plane at focal length f /// Unit3(p,1) can be viewed as normalized homogeneous coordinates of 2D point - explicit Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) { - p_.normalize(); - } + explicit Unit3(const Point2& p, double f); /// Copy constructor Unit3(const Unit3& u) { diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 2171e18828..2f1810dbbf 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -340,6 +340,10 @@ class Rot3 { gtsam::Point3 rotate(const gtsam::Point3& p) const; gtsam::Point3 unrotate(const gtsam::Point3& p) const; + // Group action on Unit3 + gtsam::Unit3 rotate(const gtsam::Unit3& p) const; + gtsam::Unit3 unrotate(const gtsam::Unit3& p) const; + // Standard Interface static gtsam::Rot3 Expmap(Vector v); static Vector Logmap(const gtsam::Rot3& p); diff --git a/gtsam/geometry/tests/testOrientedPlane3.cpp b/gtsam/geometry/tests/testOrientedPlane3.cpp index 533041a2cf..41b152557b 100644 --- a/gtsam/geometry/tests/testOrientedPlane3.cpp +++ b/gtsam/geometry/tests/testOrientedPlane3.cpp @@ -20,12 +20,9 @@ #include #include #include -#include -using namespace boost::assign; using namespace std::placeholders; using namespace gtsam; -using namespace std; using boost::none; GTSAM_CONCEPT_TESTABLE_INST(OrientedPlane3) @@ -166,6 +163,48 @@ TEST(OrientedPlane3, jacobian_retract) { } } +//******************************************************************************* +TEST(OrientedPlane3, jacobian_normal) { + Matrix23 H_actual, H_expected; + OrientedPlane3 plane(-1, 0.1, 0.2, 5); + + std::function f = std::bind( + &OrientedPlane3::normal, std::placeholders::_1, boost::none); + + H_expected = numericalDerivative11(f, plane); + plane.normal(H_actual); + EXPECT(assert_equal(H_actual, H_expected, 1e-5)); +} + +//******************************************************************************* +TEST(OrientedPlane3, jacobian_distance) { + Matrix13 H_actual, H_expected; + OrientedPlane3 plane(-1, 0.1, 0.2, 5); + + std::function f = std::bind( + &OrientedPlane3::distance, std::placeholders::_1, boost::none); + + H_expected = numericalDerivative11(f, plane); + plane.distance(H_actual); + EXPECT(assert_equal(H_actual, H_expected, 1e-5)); +} + +//******************************************************************************* +TEST(OrientedPlane3, getMethodJacobians) { + OrientedPlane3 plane(-1, 0.1, 0.2, 5); + Matrix33 H_retract, H_getters; + Matrix23 H_normal; + Matrix13 H_distance; + + // confirm the getters are exactly on the tangent space + Vector3 v(0, 0, 0); + plane.retract(v, H_retract); + plane.normal(H_normal); + plane.distance(H_distance); + H_getters << H_normal, H_distance; + EXPECT(assert_equal(H_retract, H_getters, 1e-5)); +} + /* ************************************************************************* */ int main() { srand(time(nullptr)); diff --git a/gtsam/geometry/tests/testPose2.cpp b/gtsam/geometry/tests/testPose2.cpp index 88c00a2e97..44dc55a81d 100644 --- a/gtsam/geometry/tests/testPose2.cpp +++ b/gtsam/geometry/tests/testPose2.cpp @@ -23,12 +23,10 @@ #include #include -#include // for operator += #include #include #include -using namespace boost::assign; using namespace gtsam; using namespace std; @@ -749,11 +747,10 @@ namespace align_3 { TEST(Pose2, align_3) { using namespace align_3; - Point2Pairs ab_pairs; Point2Pair ab1(make_pair(a1, b1)); Point2Pair ab2(make_pair(a2, b2)); Point2Pair ab3(make_pair(a3, b3)); - ab_pairs += ab1, ab2, ab3; + const Point2Pairs ab_pairs{ab1, ab2, ab3}; boost::optional aTb = Pose2::Align(ab_pairs); EXPECT(assert_equal(expected, *aTb)); @@ -778,9 +775,7 @@ namespace { TEST(Pose2, align_4) { using namespace align_3; - Point2Vector as, bs; - as += a1, a2, a3; - bs += b3, b1, b2; // note in 3,1,2 order ! + Point2Vector as{a1, a2, a3}, bs{b3, b1, b2}; // note in 3,1,2 order ! Triangle t1; t1.i_=0; t1.j_=1; t1.k_=2; Triangle t2; t2.i_=1; t2.j_=2; t2.k_=0; diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index 78a4af799e..dd7da172ed 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -20,15 +20,13 @@ #include #include -#include // for operator += -using namespace boost::assign; -using namespace std::placeholders; #include #include using namespace std; using namespace gtsam; +using namespace std::placeholders; GTSAM_CONCEPT_TESTABLE_INST(Pose3) GTSAM_CONCEPT_LIE_INST(Pose3) @@ -809,11 +807,10 @@ TEST( Pose3, adjointMap) { TEST(Pose3, Align1) { Pose3 expected(Rot3(), Point3(10,10,0)); - vector correspondences; - Point3Pair ab1(make_pair(Point3(10,10,0), Point3(0,0,0))); - Point3Pair ab2(make_pair(Point3(30,20,0), Point3(20,10,0))); - Point3Pair ab3(make_pair(Point3(20,30,0), Point3(10,20,0))); - correspondences += ab1, ab2, ab3; + Point3Pair ab1(Point3(10,10,0), Point3(0,0,0)); + Point3Pair ab2(Point3(30,20,0), Point3(20,10,0)); + Point3Pair ab3(Point3(20,30,0), Point3(10,20,0)); + const vector correspondences{ab1, ab2, ab3}; boost::optional actual = Pose3::Align(correspondences); EXPECT(assert_equal(expected, *actual)); @@ -825,15 +822,12 @@ TEST(Pose3, Align2) { Rot3 R = Rot3::RzRyRx(0.3, 0.2, 0.1); Pose3 expected(R, t); - vector correspondences; Point3 p1(0,0,1), p2(10,0,2), p3(20,-10,30); Point3 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2), q3 = expected.transformFrom(p3); - Point3Pair ab1(make_pair(q1, p1)); - Point3Pair ab2(make_pair(q2, p2)); - Point3Pair ab3(make_pair(q3, p3)); - correspondences += ab1, ab2, ab3; + const Point3Pair ab1{q1, p1}, ab2{q2, p2}, ab3{q3, p3}; + const vector correspondences{ab1, ab2, ab3}; boost::optional actual = Pose3::Align(correspondences); EXPECT(assert_equal(expected, *actual, 1e-5)); diff --git a/gtsam/geometry/tests/testTriangulation.cpp b/gtsam/geometry/tests/testTriangulation.cpp index bc843ad75d..95397d5b39 100644 --- a/gtsam/geometry/tests/testTriangulation.cpp +++ b/gtsam/geometry/tests/testTriangulation.cpp @@ -30,12 +30,8 @@ #include #include -#include -#include - using namespace std; using namespace gtsam; -using namespace boost::assign; // Some common constants @@ -51,34 +47,34 @@ static const PinholeCamera kCamera1(kPose1, *kSharedCal); static const Pose3 kPose2 = kPose1 * Pose3(Rot3(), Point3(1, 0, 0)); static const PinholeCamera kCamera2(kPose2, *kSharedCal); -// landmark ~5 meters infront of camera +static const std::vector kPoses = {kPose1, kPose2}; + + +// landmark ~5 meters in front of camera static const Point3 kLandmark(5, 0.5, 1.2); // 1. Project two landmarks into two cameras and triangulate static const Point2 kZ1 = kCamera1.project(kLandmark); static const Point2 kZ2 = kCamera2.project(kLandmark); +static const Point2Vector kMeasurements{kZ1, kZ2}; //****************************************************************************** // Simple test with a well-behaved two camera situation TEST(triangulation, twoPoses) { - vector poses; - Point2Vector measurements; - - poses += kPose1, kPose2; - measurements += kZ1, kZ2; + Point2Vector measurements = kMeasurements; double rank_tol = 1e-9; // 1. Test simple DLT, perfect in no noise situation bool optimize = false; boost::optional actual1 = // - triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); + triangulatePoint3(kPoses, kSharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; boost::optional actual2 = // - triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); + triangulatePoint3(kPoses, kSharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); // 3. Add some noise and try again: result should be ~ (4.995, @@ -87,13 +83,13 @@ TEST(triangulation, twoPoses) { measurements.at(1) += Point2(-0.2, 0.3); optimize = false; boost::optional actual3 = // - triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); + triangulatePoint3(kPoses, kSharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4)); // 4. Now with optimization on optimize = true; boost::optional actual4 = // - triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); + triangulatePoint3(kPoses, kSharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4)); } @@ -102,7 +98,7 @@ TEST(triangulation, twoCamerasUsingLOST) { cameras.push_back(kCamera1); cameras.push_back(kCamera2); - Point2Vector measurements = {kZ1, kZ2}; + Point2Vector measurements = kMeasurements; SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-4); double rank_tol = 1e-9; @@ -175,25 +171,21 @@ TEST(triangulation, twoPosesCal3DS2) { Point2 z1Distorted = camera1Distorted.project(kLandmark); Point2 z2Distorted = camera2Distorted.project(kLandmark); - vector poses; - Point2Vector measurements; - - poses += kPose1, kPose2; - measurements += z1Distorted, z2Distorted; + Point2Vector measurements{z1Distorted, z2Distorted}; double rank_tol = 1e-9; // 1. Test simple DLT, perfect in no noise situation bool optimize = false; boost::optional actual1 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; boost::optional actual2 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); @@ -203,14 +195,14 @@ TEST(triangulation, twoPosesCal3DS2) { measurements.at(1) += Point2(-0.2, 0.3); optimize = false; boost::optional actual3 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-3)); // 4. Now with optimization on optimize = true; boost::optional actual4 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-3)); } @@ -232,25 +224,21 @@ TEST(triangulation, twoPosesFisheye) { Point2 z1Distorted = camera1Distorted.project(kLandmark); Point2 z2Distorted = camera2Distorted.project(kLandmark); - vector poses; - Point2Vector measurements; - - poses += kPose1, kPose2; - measurements += z1Distorted, z2Distorted; + Point2Vector measurements{z1Distorted, z2Distorted}; double rank_tol = 1e-9; // 1. Test simple DLT, perfect in no noise situation bool optimize = false; boost::optional actual1 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; boost::optional actual2 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); @@ -260,14 +248,14 @@ TEST(triangulation, twoPosesFisheye) { measurements.at(1) += Point2(-0.2, 0.3); optimize = false; boost::optional actual3 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-3)); // 4. Now with optimization on optimize = true; boost::optional actual4 = // - triangulatePoint3(poses, sharedDistortedCal, measurements, + triangulatePoint3(kPoses, sharedDistortedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-3)); } @@ -284,17 +272,13 @@ TEST(triangulation, twoPosesBundler) { Point2 z1 = camera1.project(kLandmark); Point2 z2 = camera2.project(kLandmark); - vector poses; - Point2Vector measurements; - - poses += kPose1, kPose2; - measurements += z1, z2; + Point2Vector measurements{z1, z2}; bool optimize = true; double rank_tol = 1e-9; boost::optional actual = // - triangulatePoint3(poses, bundlerCal, measurements, rank_tol, + triangulatePoint3(kPoses, bundlerCal, measurements, rank_tol, optimize); EXPECT(assert_equal(kLandmark, *actual, 1e-7)); @@ -303,19 +287,15 @@ TEST(triangulation, twoPosesBundler) { measurements.at(1) += Point2(-0.2, 0.3); boost::optional actual2 = // - triangulatePoint3(poses, bundlerCal, measurements, rank_tol, + triangulatePoint3(kPoses, bundlerCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19847), *actual2, 1e-3)); } //****************************************************************************** TEST(triangulation, fourPoses) { - vector poses; - Point2Vector measurements; - - poses += kPose1, kPose2; - measurements += kZ1, kZ2; - + Pose3Vector poses = kPoses; + Point2Vector measurements = kMeasurements; boost::optional actual = triangulatePoint3(poses, kSharedCal, measurements); EXPECT(assert_equal(kLandmark, *actual, 1e-2)); @@ -334,8 +314,8 @@ TEST(triangulation, fourPoses) { PinholeCamera camera3(pose3, *kSharedCal); Point2 z3 = camera3.project(kLandmark); - poses += pose3; - measurements += z3 + Point2(0.1, -0.1); + poses.push_back(pose3); + measurements.push_back(z3 + Point2(0.1, -0.1)); boost::optional triangulated_3cameras = // triangulatePoint3(poses, kSharedCal, measurements); @@ -353,8 +333,8 @@ TEST(triangulation, fourPoses) { #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException); - poses += pose4; - measurements += Point2(400, 400); + poses.push_back(pose4); + measurements.emplace_back(400, 400); CHECK_EXCEPTION(triangulatePoint3(poses, kSharedCal, measurements), TriangulationCheiralityException); @@ -368,10 +348,8 @@ TEST(triangulation, threePoses_robustNoiseModel) { PinholeCamera camera3(pose3, *kSharedCal); Point2 z3 = camera3.project(kLandmark); - vector poses; - Point2Vector measurements; - poses += kPose1, kPose2, pose3; - measurements += kZ1, kZ2, z3; + const vector poses{kPose1, kPose2, pose3}; + Point2Vector measurements{kZ1, kZ2, z3}; // noise free, so should give exactly the landmark boost::optional actual = @@ -410,10 +388,9 @@ TEST(triangulation, fourPoses_robustNoiseModel) { PinholeCamera camera3(pose3, *kSharedCal); Point2 z3 = camera3.project(kLandmark); - vector poses; - Point2Vector measurements; - poses += kPose1, kPose1, kPose2, pose3; // 2 measurements from pose 1 - measurements += kZ1, kZ1, kZ2, z3; + const vector poses{kPose1, kPose1, kPose2, pose3}; + // 2 measurements from pose 1: + Point2Vector measurements{kZ1, kZ1, kZ2, z3}; // noise free, so should give exactly the landmark boost::optional actual = @@ -463,11 +440,8 @@ TEST(triangulation, fourPoses_distinct_Ks) { Point2 z1 = camera1.project(kLandmark); Point2 z2 = camera2.project(kLandmark); - CameraSet> cameras; - Point2Vector measurements; - - cameras += camera1, camera2; - measurements += z1, z2; + CameraSet> cameras{camera1, camera2}; + Point2Vector measurements{z1, z2}; boost::optional actual = // triangulatePoint3(cameras, measurements); @@ -488,8 +462,8 @@ TEST(triangulation, fourPoses_distinct_Ks) { PinholeCamera camera3(pose3, K3); Point2 z3 = camera3.project(kLandmark); - cameras += camera3; - measurements += z3 + Point2(0.1, -0.1); + cameras.push_back(camera3); + measurements.push_back(z3 + Point2(0.1, -0.1)); boost::optional triangulated_3cameras = // triangulatePoint3(cameras, measurements); @@ -508,8 +482,8 @@ TEST(triangulation, fourPoses_distinct_Ks) { #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException); - cameras += camera4; - measurements += Point2(400, 400); + cameras.push_back(camera4); + measurements.emplace_back(400, 400); CHECK_EXCEPTION(triangulatePoint3(cameras, measurements), TriangulationCheiralityException); #endif @@ -529,11 +503,8 @@ TEST(triangulation, fourPoses_distinct_Ks_distortion) { Point2 z1 = camera1.project(kLandmark); Point2 z2 = camera2.project(kLandmark); - CameraSet> cameras; - Point2Vector measurements; - - cameras += camera1, camera2; - measurements += z1, z2; + const CameraSet> cameras{camera1, camera2}; + const Point2Vector measurements{z1, z2}; boost::optional actual = // triangulatePoint3(cameras, measurements); @@ -554,11 +525,8 @@ TEST(triangulation, outliersAndFarLandmarks) { Point2 z1 = camera1.project(kLandmark); Point2 z2 = camera2.project(kLandmark); - CameraSet> cameras; - Point2Vector measurements; - - cameras += camera1, camera2; - measurements += z1, z2; + CameraSet> cameras{camera1, camera2}; + Point2Vector measurements{z1, z2}; double landmarkDistanceThreshold = 10; // landmark is closer than that TriangulationParameters params( @@ -582,8 +550,8 @@ TEST(triangulation, outliersAndFarLandmarks) { PinholeCamera camera3(pose3, K3); Point2 z3 = camera3.project(kLandmark); - cameras += camera3; - measurements += z3 + Point2(10, -10); + cameras.push_back(camera3); + measurements.push_back(z3 + Point2(10, -10)); landmarkDistanceThreshold = 10; // landmark is closer than that double outlierThreshold = 100; // loose, the outlier is going to pass @@ -608,11 +576,8 @@ TEST(triangulation, twoIdenticalPoses) { // 1. Project two landmarks into two cameras and triangulate Point2 z1 = camera1.project(kLandmark); - vector poses; - Point2Vector measurements; - - poses += kPose1, kPose1; - measurements += z1, z1; + const vector poses{kPose1, kPose1}; + const Point2Vector measurements{z1, z1}; CHECK_EXCEPTION(triangulatePoint3(poses, kSharedCal, measurements), TriangulationUnderconstrainedException); @@ -623,22 +588,19 @@ TEST(triangulation, onePose) { // we expect this test to fail with a TriangulationUnderconstrainedException // because there's only one camera observation - vector poses; - Point2Vector measurements; - - poses += Pose3(); - measurements += Point2(0, 0); + const vector poses{Pose3()}; + const Point2Vector measurements {{0,0}}; CHECK_EXCEPTION(triangulatePoint3(poses, kSharedCal, measurements), TriangulationUnderconstrainedException); } //****************************************************************************** -TEST(triangulation, StereotriangulateNonlinear) { +TEST(triangulation, StereoTriangulateNonlinear) { auto stereoK = boost::make_shared(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612); - // two camera poses m1, m2 + // two camera kPoses m1, m2 Matrix4 m1, m2; m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779, 0.592783835, -0.77156583, 0.230856632, 66.2186159, 0.116517574, -0.201470143, @@ -648,14 +610,12 @@ TEST(triangulation, StereotriangulateNonlinear) { 0.947083213, 0.131587097, 65.843136, -0.0206094928, 0.131334858, -0.991123524, -4.3525033, 0, 0, 0, 1; - typedef CameraSet Cameras; - Cameras cameras; - cameras.push_back(StereoCamera(Pose3(m1), stereoK)); - cameras.push_back(StereoCamera(Pose3(m2), stereoK)); + typedef CameraSet StereoCameras; + const StereoCameras cameras{{Pose3(m1), stereoK}, {Pose3(m2), stereoK}}; StereoPoint2Vector measurements; - measurements += StereoPoint2(226.936, 175.212, 424.469); - measurements += StereoPoint2(339.571, 285.547, 669.973); + measurements.push_back(StereoPoint2(226.936, 175.212, 424.469)); + measurements.push_back(StereoPoint2(339.571, 285.547, 669.973)); Point3 initial = Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191 @@ -741,8 +701,6 @@ TEST(triangulation, StereotriangulateNonlinear) { //****************************************************************************** // Simple test with a well-behaved two camera situation TEST(triangulation, twoPoses_sphericalCamera) { - vector poses; - std::vector measurements; // Project landmark into two cameras and triangulate SphericalCamera cam1(kPose1); @@ -750,8 +708,7 @@ TEST(triangulation, twoPoses_sphericalCamera) { Unit3 u1 = cam1.project(kLandmark); Unit3 u2 = cam2.project(kLandmark); - poses += kPose1, kPose2; - measurements += u1, u2; + std::vector measurements{u1, u2}; CameraSet cameras; cameras.push_back(cam1); @@ -803,9 +760,6 @@ TEST(triangulation, twoPoses_sphericalCamera) { //****************************************************************************** TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) { - vector poses; - std::vector measurements; - // Project landmark into two cameras and triangulate Pose3 poseA = Pose3( Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), @@ -825,8 +779,7 @@ TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) { EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2, 1e-7)); // behind and to the right of PoseB - poses += kPose1, kPose2; - measurements += u1, u2; + const std::vector measurements{u1, u2}; CameraSet cameras; cameras.push_back(cam1); diff --git a/gtsam/geometry/tests/testUnit3.cpp b/gtsam/geometry/tests/testUnit3.cpp index b548b93153..96a5522fb1 100644 --- a/gtsam/geometry/tests/testUnit3.cpp +++ b/gtsam/geometry/tests/testUnit3.cpp @@ -31,12 +31,9 @@ #include -#include - #include #include -using namespace boost::assign; using namespace std::placeholders; using namespace gtsam; using namespace std; @@ -51,9 +48,8 @@ Point3 point3_(const Unit3& p) { } TEST(Unit3, point3) { - vector ps; - ps += Point3(1, 0, 0), Point3(0, 1, 0), Point3(0, 0, 1), Point3(1, 1, 0) - / sqrt(2.0); + const vector ps{Point3(1, 0, 0), Point3(0, 1, 0), Point3(0, 0, 1), + Point3(1, 1, 0) / sqrt(2.0)}; Matrix actualH, expectedH; for(Point3 p: ps) { Unit3 s(p); diff --git a/gtsam/hybrid/GaussianMixture.cpp b/gtsam/hybrid/GaussianMixture.cpp index fa0cc189b6..cbf3bc376d 100644 --- a/gtsam/hybrid/GaussianMixture.cpp +++ b/gtsam/hybrid/GaussianMixture.cpp @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -33,45 +35,61 @@ GaussianMixture::GaussianMixture( : BaseFactor(CollectKeys(continuousFrontals, continuousParents), discreteParents), BaseConditional(continuousFrontals.size()), - conditionals_(conditionals) {} + conditionals_(conditionals) { + // Calculate logConstant_ as the maximum of the log constants of the + // conditionals, by visiting the decision tree: + logConstant_ = -std::numeric_limits::infinity(); + conditionals_.visit( + [this](const GaussianConditional::shared_ptr &conditional) { + if (conditional) { + this->logConstant_ = std::max( + this->logConstant_, conditional->logNormalizationConstant()); + } + }); +} /* *******************************************************************************/ -const GaussianMixture::Conditionals &GaussianMixture::conditionals() { +const GaussianMixture::Conditionals &GaussianMixture::conditionals() const { return conditionals_; } /* *******************************************************************************/ -GaussianMixture GaussianMixture::FromConditionals( +GaussianMixture::GaussianMixture( + KeyVector &&continuousFrontals, KeyVector &&continuousParents, + DiscreteKeys &&discreteParents, + std::vector &&conditionals) + : GaussianMixture(continuousFrontals, continuousParents, discreteParents, + Conditionals(discreteParents, conditionals)) {} + +/* *******************************************************************************/ +GaussianMixture::GaussianMixture( const KeyVector &continuousFrontals, const KeyVector &continuousParents, const DiscreteKeys &discreteParents, - const std::vector &conditionalsList) { - Conditionals dt(discreteParents, conditionalsList); - - return GaussianMixture(continuousFrontals, continuousParents, discreteParents, - dt); -} + const std::vector &conditionals) + : GaussianMixture(continuousFrontals, continuousParents, discreteParents, + Conditionals(discreteParents, conditionals)) {} /* *******************************************************************************/ -GaussianMixture::Sum GaussianMixture::add( - const GaussianMixture::Sum &sum) const { +// TODO(dellaert): This is copy/paste: GaussianMixture should be derived from +// GaussianMixtureFactor, no? +GaussianFactorGraphTree GaussianMixture::add( + const GaussianFactorGraphTree &sum) const { using Y = GaussianFactorGraph; auto add = [](const Y &graph1, const Y &graph2) { auto result = graph1; result.push_back(graph2); return result; }; - const Sum tree = asGaussianFactorGraphTree(); + const auto tree = asGaussianFactorGraphTree(); return sum.empty() ? tree : sum.apply(tree, add); } /* *******************************************************************************/ -GaussianMixture::Sum GaussianMixture::asGaussianFactorGraphTree() const { - auto lambda = [](const GaussianFactor::shared_ptr &factor) { - GaussianFactorGraph result; - result.push_back(factor); - return result; +GaussianFactorGraphTree GaussianMixture::asGaussianFactorGraphTree() const { + auto wrap = [](const GaussianConditional::shared_ptr &gc) { + return GaussianFactorGraph{gc}; }; - return {conditionals_, lambda}; + return {conditionals_, wrap}; } /* *******************************************************************************/ @@ -85,8 +103,8 @@ size_t GaussianMixture::nrComponents() const { /* *******************************************************************************/ GaussianConditional::shared_ptr GaussianMixture::operator()( - const DiscreteValues &discreteVals) const { - auto &ptr = conditionals_(discreteVals); + const DiscreteValues &discreteValues) const { + auto &ptr = conditionals_(discreteValues); if (!ptr) return nullptr; auto conditional = boost::dynamic_pointer_cast(ptr); if (conditional) @@ -99,13 +117,25 @@ GaussianConditional::shared_ptr GaussianMixture::operator()( /* *******************************************************************************/ bool GaussianMixture::equals(const HybridFactor &lf, double tol) const { const This *e = dynamic_cast(&lf); - return e != nullptr && BaseFactor::equals(*e, tol); + if (e == nullptr) return false; + + // This will return false if either conditionals_ is empty or e->conditionals_ + // is empty, but not if both are empty or both are not empty: + if (conditionals_.empty() ^ e->conditionals_.empty()) return false; + + // Check the base and the factors: + return BaseFactor::equals(*e, tol) && + conditionals_.equals(e->conditionals_, + [tol](const GaussianConditional::shared_ptr &f1, + const GaussianConditional::shared_ptr &f2) { + return f1->equals(*(f2), tol); + }); } /* *******************************************************************************/ void GaussianMixture::print(const std::string &s, const KeyFormatter &formatter) const { - std::cout << s; + std::cout << (s.empty() ? "" : s + "\n"); if (isContinuous()) std::cout << "Continuous "; if (isDiscrete()) std::cout << "Discrete "; if (isHybrid()) std::cout << "Hybrid "; @@ -129,9 +159,68 @@ void GaussianMixture::print(const std::string &s, } /* ************************************************************************* */ -std::set DiscreteKeysAsSet(const DiscreteKeys &dkeys) { +KeyVector GaussianMixture::continuousParents() const { + // Get all parent keys: + const auto range = parents(); + KeyVector continuousParentKeys(range.begin(), range.end()); + // Loop over all discrete keys: + for (const auto &discreteKey : discreteKeys()) { + const Key key = discreteKey.first; + // remove that key from continuousParentKeys: + continuousParentKeys.erase(std::remove(continuousParentKeys.begin(), + continuousParentKeys.end(), key), + continuousParentKeys.end()); + } + return continuousParentKeys; +} + +/* ************************************************************************* */ +bool GaussianMixture::allFrontalsGiven(const VectorValues &given) const { + for (auto &&kv : given) { + if (given.find(kv.first) == given.end()) { + return false; + } + } + return true; +} + +/* ************************************************************************* */ +boost::shared_ptr GaussianMixture::likelihood( + const VectorValues &given) const { + if (!allFrontalsGiven(given)) { + throw std::runtime_error( + "GaussianMixture::likelihood: given values are missing some frontals."); + } + + const DiscreteKeys discreteParentKeys = discreteKeys(); + const KeyVector continuousParentKeys = continuousParents(); + const GaussianMixtureFactor::Factors likelihoods( + conditionals_, [&](const GaussianConditional::shared_ptr &conditional) { + const auto likelihood_m = conditional->likelihood(given); + const double Cgm_Kgcm = + logConstant_ - conditional->logNormalizationConstant(); + if (Cgm_Kgcm == 0.0) { + return likelihood_m; + } else { + // Add a constant factor to the likelihood in case the noise models + // are not all equal. + GaussianFactorGraph gfg; + gfg.push_back(likelihood_m); + Vector c(1); + c << std::sqrt(2.0 * Cgm_Kgcm); + auto constantFactor = boost::make_shared(c); + gfg.push_back(constantFactor); + return boost::make_shared(gfg); + } + }); + return boost::make_shared( + continuousParentKeys, discreteParentKeys, likelihoods); +} + +/* ************************************************************************* */ +std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) { std::set s; - s.insert(dkeys.begin(), dkeys.end()); + s.insert(discreteKeys.begin(), discreteKeys.end()); return s; } @@ -156,7 +245,7 @@ GaussianMixture::prunerFunc(const DecisionTreeFactor &decisionTree) { const GaussianConditional::shared_ptr &conditional) -> GaussianConditional::shared_ptr { // typecast so we can use this to get probability value - DiscreteValues values(choices); + const DiscreteValues values(choices); // Case where the gaussian mixture has the same // discrete keys as the decision tree. @@ -179,7 +268,7 @@ GaussianMixture::prunerFunc(const DecisionTreeFactor &decisionTree) { DiscreteValues::CartesianProduct(set_diff); for (const DiscreteValues &assignment : assignments) { DiscreteValues augmented_values(values); - augmented_values.insert(assignment.begin(), assignment.end()); + augmented_values.insert(assignment); // If any one of the sub-branches are non-zero, // we need this conditional. @@ -207,4 +296,53 @@ void GaussianMixture::prune(const DecisionTreeFactor &decisionTree) { conditionals_.root_ = pruned_conditionals.root_; } +/* *******************************************************************************/ +AlgebraicDecisionTree GaussianMixture::logProbability( + const VectorValues &continuousValues) const { + // functor to calculate (double) logProbability value from + // GaussianConditional. + auto probFunc = + [continuousValues](const GaussianConditional::shared_ptr &conditional) { + if (conditional) { + return conditional->logProbability(continuousValues); + } else { + // Return arbitrarily small logProbability if conditional is null + // Conditional is null if it is pruned out. + return -1e20; + } + }; + return DecisionTree(conditionals_, probFunc); +} + +/* *******************************************************************************/ +AlgebraicDecisionTree GaussianMixture::error( + const VectorValues &continuousValues) const { + auto errorFunc = [&](const GaussianConditional::shared_ptr &conditional) { + return conditional->error(continuousValues) + // + logConstant_ - conditional->logNormalizationConstant(); + }; + DecisionTree errorTree(conditionals_, errorFunc); + return errorTree; +} + +/* *******************************************************************************/ +double GaussianMixture::error(const HybridValues &values) const { + // Directly index to get the conditional, no need to build the whole tree. + auto conditional = conditionals_(values.discrete()); + return conditional->error(values.continuous()) + // + logConstant_ - conditional->logNormalizationConstant(); +} + +/* *******************************************************************************/ +double GaussianMixture::logProbability(const HybridValues &values) const { + auto conditional = conditionals_(values.discrete()); + return conditional->logProbability(values.continuous()); +} + +/* *******************************************************************************/ +double GaussianMixture::evaluate(const HybridValues &values) const { + auto conditional = conditionals_(values.discrete()); + return conditional->evaluate(values.continuous()); +} + } // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixture.h b/gtsam/hybrid/GaussianMixture.h index dada35ae4b..eef0419a24 100644 --- a/gtsam/hybrid/GaussianMixture.h +++ b/gtsam/hybrid/GaussianMixture.h @@ -23,12 +23,15 @@ #include #include #include +#include #include #include #include namespace gtsam { +class HybridValues; + /** * @brief A conditional of gaussian mixtures indexed by discrete variables, as * part of a Bayes Network. This is the result of the elimination of a @@ -56,19 +59,17 @@ class GTSAM_EXPORT GaussianMixture using BaseFactor = HybridFactor; using BaseConditional = Conditional; - /// Alias for DecisionTree of GaussianFactorGraphs - using Sum = DecisionTree; - /// typedef for Decision Tree of Gaussian Conditionals using Conditionals = DecisionTree; private: - Conditionals conditionals_; + Conditionals conditionals_; ///< a decision tree of Gaussian conditionals. + double logConstant_; ///< log of the normalization constant. /** * @brief Convert a DecisionTree of factors into a DT of Gaussian FGs. */ - Sum asGaussianFactorGraphTree() const; + GaussianFactorGraphTree asGaussianFactorGraphTree() const; /** * @brief Helper function to get the pruner functor. @@ -85,7 +86,7 @@ class GTSAM_EXPORT GaussianMixture /// @name Constructors /// @{ - /// Defaut constructor, mainly for serialization. + /// Default constructor, mainly for serialization. GaussianMixture() = default; /** @@ -112,21 +113,23 @@ class GTSAM_EXPORT GaussianMixture * @param discreteParents Discrete parents variables * @param conditionals List of conditionals */ - static This FromConditionals( + GaussianMixture(KeyVector &&continuousFrontals, KeyVector &&continuousParents, + DiscreteKeys &&discreteParents, + std::vector &&conditionals); + + /** + * @brief Make a Gaussian Mixture from a list of Gaussian conditionals + * + * @param continuousFrontals The continuous frontal variables + * @param continuousParents The continuous parent variables + * @param discreteParents Discrete parents variables + * @param conditionals List of conditionals + */ + GaussianMixture( const KeyVector &continuousFrontals, const KeyVector &continuousParents, const DiscreteKeys &discreteParents, const std::vector &conditionals); - /// @} - /// @name Standard API - /// @{ - - GaussianConditional::shared_ptr operator()( - const DiscreteValues &discreteVals) const; - - /// Returns the total number of continuous components - size_t nrComponents() const; - /// @} /// @name Testable /// @{ @@ -140,9 +143,94 @@ class GTSAM_EXPORT GaussianMixture const KeyFormatter &formatter = DefaultKeyFormatter) const override; /// @} + /// @name Standard API + /// @{ + + /// @brief Return the conditional Gaussian for the given discrete assignment. + GaussianConditional::shared_ptr operator()( + const DiscreteValues &discreteValues) const; + + /// Returns the total number of continuous components + size_t nrComponents() const; + + /// Returns the continuous keys among the parents. + KeyVector continuousParents() const; + + /// The log normalization constant is max of the the individual + /// log-normalization constants. + double logNormalizationConstant() const override { return logConstant_; } + + /** + * Create a likelihood factor for a Gaussian mixture, return a Mixture factor + * on the parents. + */ + boost::shared_ptr likelihood( + const VectorValues &given) const; /// Getter for the underlying Conditionals DecisionTree - const Conditionals &conditionals(); + const Conditionals &conditionals() const; + + /** + * @brief Compute logProbability of the GaussianMixture as a tree. + * + * @param continuousValues The continuous VectorValues. + * @return AlgebraicDecisionTree A decision tree with the same keys + * as the conditionals, and leaf values as the logProbability. + */ + AlgebraicDecisionTree logProbability( + const VectorValues &continuousValues) const; + + /** + * @brief Compute the error of this Gaussian Mixture. + * + * This requires some care, as different mixture components may have + * different normalization constants. Let's consider p(x|y,m), where m is + * discrete. We need the error to satisfy the invariant: + * + * error(x;y,m) = K - log(probability(x;y,m)) + * + * For all x,y,m. But note that K, the (log) normalization constant defined + * in Conditional.h, should not depend on x, y, or m, only on the parameters + * of the density. Hence, we delegate to the underlying Gaussian + * conditionals, indexed by m, which do satisfy: + * + * log(probability_m(x;y)) = K_m - error_m(x;y) + * + * We resolve by having K == max(K_m) and + * + * error(x;y,m) = error_m(x;y) + K - K_m + * + * which also makes error(x;y,m) >= 0 for all x,y,m. + * + * @param values Continuous values and discrete assignment. + * @return double + */ + double error(const HybridValues &values) const override; + + /** + * @brief Compute error of the GaussianMixture as a tree. + * + * @param continuousValues The continuous VectorValues. + * @return AlgebraicDecisionTree A decision tree on the discrete keys + * only, with the leaf values as the error for each assignment. + */ + AlgebraicDecisionTree error(const VectorValues &continuousValues) const; + + /** + * @brief Compute the logProbability of this Gaussian Mixture. + * + * @param values Continuous values and discrete assignment. + * @return double + */ + double logProbability(const HybridValues &values) const override; + + /// Calculate probability density for given `values`. + double evaluate(const HybridValues &values) const override; + + /// Evaluate probability density, sugar. + double operator()(const HybridValues &values) const { + return evaluate(values); + } /** * @brief Prune the decision tree of Gaussian factors as per the discrete @@ -158,13 +246,27 @@ class GTSAM_EXPORT GaussianMixture * maintaining the decision tree structure. * * @param sum Decision Tree of Gaussian Factor Graphs - * @return Sum + * @return GaussianFactorGraphTree */ - Sum add(const Sum &sum) const; + GaussianFactorGraphTree add(const GaussianFactorGraphTree &sum) const; + /// @} + + private: + /// Check whether `given` has values for all frontal keys. + bool allFrontalsGiven(const VectorValues &given) const; + + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(Archive &ar, const unsigned int /*version*/) { + ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseFactor); + ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseConditional); + ar &BOOST_SERIALIZATION_NVP(conditionals_); + } }; /// Return the DiscreteKey vector as a set. -std::set DiscreteKeysAsSet(const DiscreteKeys &dkeys); +std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys); // traits template <> diff --git a/gtsam/hybrid/GaussianMixtureFactor.cpp b/gtsam/hybrid/GaussianMixtureFactor.cpp index 181b1e6a5a..0c7ff0e87e 100644 --- a/gtsam/hybrid/GaussianMixtureFactor.cpp +++ b/gtsam/hybrid/GaussianMixtureFactor.cpp @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include namespace gtsam { @@ -35,16 +37,18 @@ GaussianMixtureFactor::GaussianMixtureFactor(const KeyVector &continuousKeys, /* *******************************************************************************/ bool GaussianMixtureFactor::equals(const HybridFactor &lf, double tol) const { const This *e = dynamic_cast(&lf); - return e != nullptr && Base::equals(*e, tol); -} + if (e == nullptr) return false; -/* *******************************************************************************/ -GaussianMixtureFactor GaussianMixtureFactor::FromFactors( - const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys, - const std::vector &factors) { - Factors dt(discreteKeys, factors); + // This will return false if either factors_ is empty or e->factors_ is empty, + // but not if both are empty or both are not empty: + if (factors_.empty() ^ e->factors_.empty()) return false; - return GaussianMixtureFactor(continuousKeys, discreteKeys, dt); + // Check the base and the factors: + return Base::equals(*e, tol) && + factors_.equals(e->factors_, + [tol](const sharedFactor &f1, const sharedFactor &f2) { + return f1->equals(*f2, tol); + }); } /* *******************************************************************************/ @@ -52,47 +56,67 @@ void GaussianMixtureFactor::print(const std::string &s, const KeyFormatter &formatter) const { HybridFactor::print(s, formatter); std::cout << "{\n"; - factors_.print( - "", [&](Key k) { return formatter(k); }, - [&](const GaussianFactor::shared_ptr &gf) -> std::string { - RedirectCout rd; - std::cout << ":\n"; - if (gf && !gf->empty()) { - gf->print("", formatter); - return rd.str(); - } else { - return "nullptr"; - } - }); + if (factors_.empty()) { + std::cout << " empty" << std::endl; + } else { + factors_.print( + "", [&](Key k) { return formatter(k); }, + [&](const sharedFactor &gf) -> std::string { + RedirectCout rd; + std::cout << ":\n"; + if (gf && !gf->empty()) { + gf->print("", formatter); + return rd.str(); + } else { + return "nullptr"; + } + }); + } std::cout << "}" << std::endl; } /* *******************************************************************************/ -const GaussianMixtureFactor::Factors &GaussianMixtureFactor::factors() { - return factors_; +GaussianMixtureFactor::sharedFactor GaussianMixtureFactor::operator()( + const DiscreteValues &assignment) const { + return factors_(assignment); } /* *******************************************************************************/ -GaussianMixtureFactor::Sum GaussianMixtureFactor::add( - const GaussianMixtureFactor::Sum &sum) const { +GaussianFactorGraphTree GaussianMixtureFactor::add( + const GaussianFactorGraphTree &sum) const { using Y = GaussianFactorGraph; auto add = [](const Y &graph1, const Y &graph2) { auto result = graph1; result.push_back(graph2); return result; }; - const Sum tree = asGaussianFactorGraphTree(); + const auto tree = asGaussianFactorGraphTree(); return sum.empty() ? tree : sum.apply(tree, add); } /* *******************************************************************************/ -GaussianMixtureFactor::Sum GaussianMixtureFactor::asGaussianFactorGraphTree() +GaussianFactorGraphTree GaussianMixtureFactor::asGaussianFactorGraphTree() const { - auto wrap = [](const GaussianFactor::shared_ptr &factor) { - GaussianFactorGraph result; - result.push_back(factor); - return result; - }; + auto wrap = [](const sharedFactor &gf) { return GaussianFactorGraph{gf}; }; return {factors_, wrap}; } + +/* *******************************************************************************/ +AlgebraicDecisionTree GaussianMixtureFactor::error( + const VectorValues &continuousValues) const { + // functor to convert from sharedFactor to double error value. + auto errorFunc = [&continuousValues](const sharedFactor &gf) { + return gf->error(continuousValues); + }; + DecisionTree errorTree(factors_, errorFunc); + return errorTree; +} + +/* *******************************************************************************/ +double GaussianMixtureFactor::error(const HybridValues &values) const { + const sharedFactor gf = factors_(values.discrete()); + return gf->error(values.continuous()); +} +/* *******************************************************************************/ + } // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixtureFactor.h b/gtsam/hybrid/GaussianMixtureFactor.h index 0d649f8576..aa8f2a1996 100644 --- a/gtsam/hybrid/GaussianMixtureFactor.h +++ b/gtsam/hybrid/GaussianMixtureFactor.h @@ -20,16 +20,18 @@ #pragma once +#include #include #include -#include +#include #include +#include namespace gtsam { -class GaussianFactorGraph; - -using GaussianFactorVector = std::vector; +class HybridValues; +class DiscreteValues; +class VectorValues; /** * @brief Implementation of a discrete conditional mixture factor. @@ -37,7 +39,7 @@ using GaussianFactorVector = std::vector; * serves to "select" a mixture component corresponding to a GaussianFactor type * of measurement. * - * Represents the underlying Gaussian Mixture as a Decision Tree, where the set + * Represents the underlying Gaussian mixture as a Decision Tree, where the set * of discrete variables indexes to the continuous gaussian distribution. * * @ingroup hybrid @@ -48,10 +50,10 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { using This = GaussianMixtureFactor; using shared_ptr = boost::shared_ptr; - using Sum = DecisionTree; + using sharedFactor = boost::shared_ptr; - /// typedef for Decision Tree of Gaussian Factors - using Factors = DecisionTree; + /// typedef for Decision Tree of Gaussian factors and log-constant. + using Factors = DecisionTree; private: /// Decision tree of Gaussian factors indexed by discrete keys. @@ -61,9 +63,9 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { * @brief Helper function to return factors and functional to create a * DecisionTree of Gaussian Factor Graphs. * - * @return Sum (DecisionTree) + * @return GaussianFactorGraphTree */ - Sum asGaussianFactorGraphTree() const; + GaussianFactorGraphTree asGaussianFactorGraphTree() const; public: /// @name Constructors @@ -73,12 +75,12 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { GaussianMixtureFactor() = default; /** - * @brief Construct a new Gaussian Mixture Factor object. + * @brief Construct a new Gaussian mixture factor. * * @param continuousKeys A vector of keys representing continuous variables. * @param discreteKeys A vector of keys representing discrete variables and * their cardinalities. - * @param factors The decision tree of Gaussian Factors stored as the mixture + * @param factors The decision tree of Gaussian factors stored as the mixture * density. */ GaussianMixtureFactor(const KeyVector &continuousKeys, @@ -89,19 +91,16 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { * @brief Construct a new GaussianMixtureFactor object using a vector of * GaussianFactor shared pointers. * - * @param keys Vector of keys for continuous factors. + * @param continuousKeys Vector of keys for continuous factors. * @param discreteKeys Vector of discrete keys. * @param factors Vector of gaussian factor shared pointers. */ - GaussianMixtureFactor(const KeyVector &keys, const DiscreteKeys &discreteKeys, - const std::vector &factors) - : GaussianMixtureFactor(keys, discreteKeys, + GaussianMixtureFactor(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys, + const std::vector &factors) + : GaussianMixtureFactor(continuousKeys, discreteKeys, Factors(discreteKeys, factors)) {} - static This FromFactors( - const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys, - const std::vector &factors); - /// @} /// @name Testable /// @{ @@ -111,10 +110,13 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { void print( const std::string &s = "GaussianMixtureFactor\n", const KeyFormatter &formatter = DefaultKeyFormatter) const override; + /// @} + /// @name Standard API + /// @{ - /// Getter for the underlying Gaussian Factor Decision Tree. - const Factors &factors(); + /// Get factor at a given discrete assignment. + sharedFactor operator()(const DiscreteValues &assignment) const; /** * @brief Combine the Gaussian Factor Graphs in `sum` and `this` while @@ -124,13 +126,39 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { * variables. * @return Sum */ - Sum add(const Sum &sum) const; + GaussianFactorGraphTree add(const GaussianFactorGraphTree &sum) const; + + /** + * @brief Compute error of the GaussianMixtureFactor as a tree. + * + * @param continuousValues The continuous VectorValues. + * @return AlgebraicDecisionTree A decision tree with the same keys + * as the factors involved, and leaf values as the error. + */ + AlgebraicDecisionTree error(const VectorValues &continuousValues) const; + + /** + * @brief Compute the log-likelihood, including the log-normalizing constant. + * @return double + */ + double error(const HybridValues &values) const override; /// Add MixtureFactor to a Sum, syntactic sugar. - friend Sum &operator+=(Sum &sum, const GaussianMixtureFactor &factor) { + friend GaussianFactorGraphTree &operator+=( + GaussianFactorGraphTree &sum, const GaussianMixtureFactor &factor) { sum = factor.add(sum); return sum; } + /// @} + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + ar &BOOST_SERIALIZATION_NVP(factors_); + } }; // traits diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index a641363846..7c9023e582 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- - * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * GTSAM Copyright 2010-2022, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) @@ -8,10 +8,11 @@ /** * @file HybridBayesNet.cpp - * @brief A bayes net of Gaussian Conditionals indexed by discrete keys. + * @brief A Bayes net of Gaussian Conditionals indexed by discrete keys. * @author Fan Jiang * @author Varun Agrawal * @author Shangjie Xue + * @author Frank Dellaert * @date January 2022 */ @@ -20,21 +21,34 @@ #include #include +// In Wrappers we have no access to this so have a default ready +static std::mt19937_64 kRandomNumberGenerator(42); + namespace gtsam { +/* ************************************************************************* */ +void HybridBayesNet::print(const std::string &s, + const KeyFormatter &formatter) const { + Base::print(s, formatter); +} + +/* ************************************************************************* */ +bool HybridBayesNet::equals(const This &bn, double tol) const { + return Base::equals(bn, tol); +} + /* ************************************************************************* */ DecisionTreeFactor::shared_ptr HybridBayesNet::discreteConditionals() const { AlgebraicDecisionTree decisionTree; - // The canonical decision tree factor which will get the discrete conditionals - // added to it. + // The canonical decision tree factor which will get + // the discrete conditionals added to it. DecisionTreeFactor dtFactor; - for (size_t i = 0; i < this->size(); i++) { - HybridConditional::shared_ptr conditional = this->at(i); + for (auto &&conditional : *this) { if (conditional->isDiscrete()) { // Convert to a DecisionTreeFactor and add it to the main factor. - DecisionTreeFactor f(*conditional->asDiscreteConditional()); + DecisionTreeFactor f(*conditional->asDiscrete()); dtFactor = dtFactor * f; } } @@ -45,52 +59,84 @@ DecisionTreeFactor::shared_ptr HybridBayesNet::discreteConditionals() const { /** * @brief Helper function to get the pruner functional. * - * @param decisionTree The probability decision tree of only discrete keys. - * @return std::function &, const GaussianConditional::shared_ptr &)> + * @param prunedDecisionTree The prob. decision tree of only discrete keys. + * @param conditional Conditional to prune. Used to get full assignment. + * @return std::function &, double)> */ std::function &, double)> prunerFunc( - const DecisionTreeFactor &decisionTree, + const DecisionTreeFactor &prunedDecisionTree, const HybridConditional &conditional) { // Get the discrete keys as sets for the decision tree - // and the gaussian mixture. - auto decisionTreeKeySet = DiscreteKeysAsSet(decisionTree.discreteKeys()); - auto conditionalKeySet = DiscreteKeysAsSet(conditional.discreteKeys()); + // and the Gaussian mixture. + std::set decisionTreeKeySet = + DiscreteKeysAsSet(prunedDecisionTree.discreteKeys()); + std::set conditionalKeySet = + DiscreteKeysAsSet(conditional.discreteKeys()); - auto pruner = [decisionTree, decisionTreeKeySet, conditionalKeySet]( + auto pruner = [prunedDecisionTree, decisionTreeKeySet, conditionalKeySet]( const Assignment &choices, double probability) -> double { + // This corresponds to 0 probability + double pruned_prob = 0.0; + // typecast so we can use this to get probability value DiscreteValues values(choices); - // Case where the gaussian mixture has the same + // Case where the Gaussian mixture has the same // discrete keys as the decision tree. if (conditionalKeySet == decisionTreeKeySet) { - if (decisionTree(values) == 0) { - return 0.0; + if (prunedDecisionTree(values) == 0) { + return pruned_prob; } else { return probability; } } else { + // Due to branch merging (aka pruning) in DecisionTree, it is possible we + // get a `values` which doesn't have the full set of keys. + std::set valuesKeys; + for (auto kvp : values) { + valuesKeys.insert(kvp.first); + } + std::set conditionalKeys; + for (auto kvp : conditionalKeySet) { + conditionalKeys.insert(kvp.first); + } + // If true, then values is missing some keys + if (conditionalKeys != valuesKeys) { + // Get the keys present in conditionalKeys but not in valuesKeys + std::vector missing_keys; + std::set_difference(conditionalKeys.begin(), conditionalKeys.end(), + valuesKeys.begin(), valuesKeys.end(), + std::back_inserter(missing_keys)); + // Insert missing keys with a default assignment. + for (auto missing_key : missing_keys) { + values[missing_key] = 0; + } + } + + // Now we generate the full assignment by enumerating + // over all keys in the prunedDecisionTree. + // First we find the differing keys std::vector set_diff; std::set_difference(decisionTreeKeySet.begin(), decisionTreeKeySet.end(), conditionalKeySet.begin(), conditionalKeySet.end(), std::back_inserter(set_diff)); + // Now enumerate over all assignments of the differing keys const std::vector assignments = DiscreteValues::CartesianProduct(set_diff); for (const DiscreteValues &assignment : assignments) { DiscreteValues augmented_values(values); - augmented_values.insert(assignment.begin(), assignment.end()); + augmented_values.insert(assignment); // If any one of the sub-branches are non-zero, // we need this probability. - if (decisionTree(augmented_values) > 0.0) { + if (prunedDecisionTree(augmented_values) > 0.0) { return probability; } } // If we are here, it means that all the sub-branches are 0, // so we prune. - return 0.0; + return pruned_prob; } }; return pruner; @@ -98,24 +144,24 @@ std::function &, double)> prunerFunc( /* ************************************************************************* */ void HybridBayesNet::updateDiscreteConditionals( - const DecisionTreeFactor::shared_ptr &prunedDecisionTree) { - KeyVector prunedTreeKeys = prunedDecisionTree->keys(); + const DecisionTreeFactor &prunedDecisionTree) { + KeyVector prunedTreeKeys = prunedDecisionTree.keys(); + // Loop with index since we need it later. for (size_t i = 0; i < this->size(); i++) { HybridConditional::shared_ptr conditional = this->at(i); if (conditional->isDiscrete()) { - // std::cout << demangle(typeid(conditional).name()) << std::endl; - auto discrete = conditional->asDiscreteConditional(); - KeyVector frontals(discrete->frontals().begin(), - discrete->frontals().end()); + auto discrete = conditional->asDiscrete(); // Apply prunerFunc to the underlying AlgebraicDecisionTree auto discreteTree = boost::dynamic_pointer_cast(discrete); DecisionTreeFactor::ADT prunedDiscreteTree = - discreteTree->apply(prunerFunc(*prunedDecisionTree, *conditional)); + discreteTree->apply(prunerFunc(prunedDecisionTree, *conditional)); // Create the new (hybrid) conditional + KeyVector frontals(discrete->frontals().begin(), + discrete->frontals().end()); auto prunedDiscrete = boost::make_shared( frontals.size(), conditional->discreteKeys(), prunedDiscreteTree); conditional = boost::make_shared(prunedDiscrete); @@ -130,9 +176,7 @@ void HybridBayesNet::updateDiscreteConditionals( HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { // Get the decision tree of only the discrete keys auto discreteConditionals = this->discreteConditionals(); - const DecisionTreeFactor::shared_ptr decisionTree = - boost::make_shared( - discreteConditionals->prune(maxNrLeaves)); + const auto decisionTree = discreteConditionals->prune(maxNrLeaves); this->updateDiscreteConditionals(decisionTree); @@ -147,20 +191,14 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { // Go through all the conditionals in the // Bayes Net and prune them as per decisionTree. - for (size_t i = 0; i < this->size(); i++) { - HybridConditional::shared_ptr conditional = this->at(i); - - if (conditional->isHybrid()) { - GaussianMixture::shared_ptr gaussianMixture = conditional->asMixture(); - - // Make a copy of the gaussian mixture and prune it! - auto prunedGaussianMixture = - boost::make_shared(*gaussianMixture); - prunedGaussianMixture->prune(*decisionTree); + for (auto &&conditional : *this) { + if (auto gm = conditional->asMixture()) { + // Make a copy of the Gaussian mixture and prune it! + auto prunedGaussianMixture = boost::make_shared(*gm); + prunedGaussianMixture->prune(decisionTree); // imperative :-( // Type-erase and add to the pruned Bayes Net fragment. - prunedBayesNetFragment.push_back( - boost::make_shared(prunedGaussianMixture)); + prunedBayesNetFragment.push_back(prunedGaussianMixture); } else { // Add the non-GaussianMixture conditional @@ -171,37 +209,19 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { return prunedBayesNetFragment; } -/* ************************************************************************* */ -GaussianMixture::shared_ptr HybridBayesNet::atMixture(size_t i) const { - return factors_.at(i)->asMixture(); -} - -/* ************************************************************************* */ -GaussianConditional::shared_ptr HybridBayesNet::atGaussian(size_t i) const { - return factors_.at(i)->asGaussian(); -} - -/* ************************************************************************* */ -DiscreteConditional::shared_ptr HybridBayesNet::atDiscrete(size_t i) const { - return factors_.at(i)->asDiscreteConditional(); -} - /* ************************************************************************* */ GaussianBayesNet HybridBayesNet::choose( const DiscreteValues &assignment) const { GaussianBayesNet gbn; - for (size_t idx = 0; idx < size(); idx++) { - if (factors_.at(idx)->isHybrid()) { - // If factor is hybrid, select based on assignment. - GaussianMixture gm = *this->atMixture(idx); - gbn.push_back(gm(assignment)); - - } else if (factors_.at(idx)->isContinuous()) { - // If continuous only, add gaussian conditional. - gbn.push_back((this->atGaussian(idx))); - - } else if (factors_.at(idx)->isDiscrete()) { - // If factor at `idx` is discrete-only, we simply continue. + for (auto &&conditional : *this) { + if (auto gm = conditional->asMixture()) { + // If conditional is hybrid, select based on assignment. + gbn.push_back((*gm)(assignment)); + } else if (auto gc = conditional->asGaussian()) { + // If continuous only, add Gaussian conditional. + gbn.push_back(gc); + } else if (auto dc = conditional->asDiscrete()) { + // If conditional is discrete-only, we simply continue. continue; } } @@ -211,25 +231,133 @@ GaussianBayesNet HybridBayesNet::choose( /* ************************************************************************* */ HybridValues HybridBayesNet::optimize() const { - // Solve for the MPE + // Collect all the discrete factors to compute MPE DiscreteBayesNet discrete_bn; - for (auto &conditional : factors_) { + for (auto &&conditional : *this) { if (conditional->isDiscrete()) { - discrete_bn.push_back(conditional->asDiscreteConditional()); + discrete_bn.push_back(conditional->asDiscrete()); } } + // Solve for the MPE DiscreteValues mpe = DiscreteFactorGraph(discrete_bn).optimize(); // Given the MPE, compute the optimal continuous values. - GaussianBayesNet gbn = this->choose(mpe); - return HybridValues(mpe, gbn.optimize()); + return HybridValues(optimize(mpe), mpe); } /* ************************************************************************* */ VectorValues HybridBayesNet::optimize(const DiscreteValues &assignment) const { - GaussianBayesNet gbn = this->choose(assignment); + GaussianBayesNet gbn = choose(assignment); + + // Check if there exists a nullptr in the GaussianBayesNet + // If yes, return an empty VectorValues + if (std::find(gbn.begin(), gbn.end(), nullptr) != gbn.end()) { + return VectorValues(); + } return gbn.optimize(); } +/* ************************************************************************* */ +HybridValues HybridBayesNet::sample(const HybridValues &given, + std::mt19937_64 *rng) const { + DiscreteBayesNet dbn; + for (auto &&conditional : *this) { + if (conditional->isDiscrete()) { + // If conditional is discrete-only, we add to the discrete Bayes net. + dbn.push_back(conditional->asDiscrete()); + } + } + // Sample a discrete assignment. + const DiscreteValues assignment = dbn.sample(given.discrete()); + // Select the continuous Bayes net corresponding to the assignment. + GaussianBayesNet gbn = choose(assignment); + // Sample from the Gaussian Bayes net. + VectorValues sample = gbn.sample(given.continuous(), rng); + return {sample, assignment}; +} + +/* ************************************************************************* */ +HybridValues HybridBayesNet::sample(std::mt19937_64 *rng) const { + HybridValues given; + return sample(given, rng); +} + +/* ************************************************************************* */ +HybridValues HybridBayesNet::sample(const HybridValues &given) const { + return sample(given, &kRandomNumberGenerator); +} + +/* ************************************************************************* */ +HybridValues HybridBayesNet::sample() const { + return sample(&kRandomNumberGenerator); +} + +/* ************************************************************************* */ +AlgebraicDecisionTree HybridBayesNet::logProbability( + const VectorValues &continuousValues) const { + AlgebraicDecisionTree result(0.0); + + // Iterate over each conditional. + for (auto &&conditional : *this) { + if (auto gm = conditional->asMixture()) { + // If conditional is hybrid, select based on assignment and compute + // logProbability. + result = result + gm->logProbability(continuousValues); + } else if (auto gc = conditional->asGaussian()) { + // If continuous, get the (double) logProbability and add it to the + // result + double logProbability = gc->logProbability(continuousValues); + // Add the computed logProbability to every leaf of the logProbability + // tree. + result = result.apply([logProbability](double leaf_value) { + return leaf_value + logProbability; + }); + } else if (auto dc = conditional->asDiscrete()) { + // If discrete, add the discrete logProbability in the right branch + result = result.apply( + [dc](const Assignment &assignment, double leaf_value) { + return leaf_value + dc->logProbability(DiscreteValues(assignment)); + }); + } + } + + return result; +} + +/* ************************************************************************* */ +AlgebraicDecisionTree HybridBayesNet::evaluate( + const VectorValues &continuousValues) const { + AlgebraicDecisionTree tree = this->logProbability(continuousValues); + return tree.apply([](double log) { return exp(log); }); +} + +/* ************************************************************************* */ +double HybridBayesNet::evaluate(const HybridValues &values) const { + return exp(logProbability(values)); +} + +/* ************************************************************************* */ +HybridGaussianFactorGraph HybridBayesNet::toFactorGraph( + const VectorValues &measurements) const { + HybridGaussianFactorGraph fg; + + // For all nodes in the Bayes net, if its frontal variable is in measurements, + // replace it by a likelihood factor: + for (auto &&conditional : *this) { + if (conditional->frontalsIn(measurements)) { + if (auto gc = conditional->asGaussian()) { + fg.push_back(gc->likelihood(measurements)); + } else if (auto gm = conditional->asMixture()) { + fg.push_back(gm->likelihood(measurements)); + } else { + throw std::runtime_error("Unknown conditional type"); + } + } else { + fg.push_back(conditional); + } + } + return fg; +} + } // namespace gtsam diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 84c1da6ea8..46a2b4f771 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -8,7 +8,7 @@ /** * @file HybridBayesNet.h - * @brief A bayes net of Gaussian Conditionals indexed by discrete keys. + * @brief A Bayes net of Gaussian Conditionals indexed by discrete keys. * @author Varun Agrawal * @author Fan Jiang * @author Frank Dellaert @@ -43,48 +43,63 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { /// @name Standard Constructors /// @{ - /** Construct empty bayes net */ + /** Construct empty Bayes net */ HybridBayesNet() = default; /// @} /// @name Testable /// @{ - /** Check equality */ - bool equals(const This &bn, double tol = 1e-9) const { - return Base::equals(bn, tol); - } + /// GTSAM-style printing + void print(const std::string &s = "", const KeyFormatter &formatter = + DefaultKeyFormatter) const override; - /// print graph - void print( - const std::string &s = "", - const KeyFormatter &formatter = DefaultKeyFormatter) const override { - Base::print(s, formatter); - } + /// GTSAM-style equals + bool equals(const This &fg, double tol = 1e-9) const; /// @} /// @name Standard Interface /// @{ - /// Add HybridConditional to Bayes Net - using Base::add; - - /// Add a discrete conditional to the Bayes Net. - void add(const DiscreteKey &key, const std::string &table) { - push_back( - HybridConditional(boost::make_shared(key, table))); + /** + * @brief Add a hybrid conditional using a shared_ptr. + * + * This is the "native" push back, as this class stores hybrid conditionals. + */ + void push_back(boost::shared_ptr conditional) { + factors_.push_back(conditional); } - using Base::push_back; - - /// Get a specific Gaussian mixture by index `i`. - GaussianMixture::shared_ptr atMixture(size_t i) const; - - /// Get a specific Gaussian conditional by index `i`. - GaussianConditional::shared_ptr atGaussian(size_t i) const; + /** + * Preferred: add a conditional directly using a pointer. + * + * Examples: + * hbn.emplace_back(new GaussianMixture(...))); + * hbn.emplace_back(new GaussianConditional(...))); + * hbn.emplace_back(new DiscreteConditional(...))); + */ + template + void emplace_back(Conditional *conditional) { + factors_.push_back(boost::make_shared( + boost::shared_ptr(conditional))); + } - /// Get a specific discrete conditional by index `i`. - DiscreteConditional::shared_ptr atDiscrete(size_t i) const; + /** + * Add a conditional using a shared_ptr, using implicit conversion to + * a HybridConditional. + * + * This is useful when you create a conditional shared pointer as you need it + * somewhere else. + * + * Example: + * auto shared_ptr_to_a_conditional = + * boost::make_shared(...); + * hbn.push_back(shared_ptr_to_a_conditional); + */ + void push_back(HybridConditional &&conditional) { + factors_.push_back( + boost::make_shared(std::move(conditional))); + } /** * @brief Get the Gaussian Bayes Net which corresponds to a specific discrete @@ -95,6 +110,14 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { */ GaussianBayesNet choose(const DiscreteValues &assignment) const; + /// Evaluate hybrid probability density for given HybridValues. + double evaluate(const HybridValues &values) const; + + /// Evaluate hybrid probability density for given HybridValues, sugar. + double operator()(const HybridValues &values) const { + return evaluate(values); + } + /** * @brief Solve the HybridBayesNet by first computing the MPE of all the * discrete variables and then optimizing the continuous variables based on @@ -120,10 +143,81 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { */ DecisionTreeFactor::shared_ptr discreteConditionals() const; - public: + /** + * @brief Sample from an incomplete BayesNet, given missing variables. + * + * Example: + * std::mt19937_64 rng(42); + * VectorValues given = ...; + * auto sample = bn.sample(given, &rng); + * + * @param given Values of missing variables. + * @param rng The pseudo-random number generator. + * @return HybridValues + */ + HybridValues sample(const HybridValues &given, std::mt19937_64 *rng) const; + + /** + * @brief Sample using ancestral sampling. + * + * Example: + * std::mt19937_64 rng(42); + * auto sample = bn.sample(&rng); + * + * @param rng The pseudo-random number generator. + * @return HybridValues + */ + HybridValues sample(std::mt19937_64 *rng) const; + + /** + * @brief Sample from an incomplete BayesNet, use default rng. + * + * @param given Values of missing variables. + * @return HybridValues + */ + HybridValues sample(const HybridValues &given) const; + + /** + * @brief Sample using ancestral sampling, use default rng. + * + * @return HybridValues + */ + HybridValues sample() const; + /// Prune the Hybrid Bayes Net such that we have at most maxNrLeaves leaves. HybridBayesNet prune(size_t maxNrLeaves); + /** + * @brief Compute conditional error for each discrete assignment, + * and return as a tree. + * + * @param continuousValues Continuous values at which to compute the error. + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree logProbability( + const VectorValues &continuousValues) const; + + using BayesNet::logProbability; // expose HybridValues version + + /** + * @brief Compute unnormalized probability q(μ|M), + * for each discrete assignment, and return as a tree. + * q(μ|M) is the unnormalized probability at the MLE point μ, + * conditioned on the discrete variables. + * + * @param continuousValues Continuous values at which to compute the + * probability. + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree evaluate( + const VectorValues &continuousValues) const; + + /** + * Convert a hybrid Bayes net to a hybrid Gaussian factor graph by converting + * all conditionals with instantiated measurements into likelihood factors. + */ + HybridGaussianFactorGraph toFactorGraph( + const VectorValues &measurements) const; /// @} private: @@ -132,8 +226,7 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { * * @param prunedDecisionTree */ - void updateDiscreteConditionals( - const DecisionTreeFactor::shared_ptr &prunedDecisionTree); + void updateDiscreteConditionals(const DecisionTreeFactor &prunedDecisionTree); /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/hybrid/HybridBayesTree.cpp b/gtsam/hybrid/HybridBayesTree.cpp index 8fdedab44f..df2367cb5c 100644 --- a/gtsam/hybrid/HybridBayesTree.cpp +++ b/gtsam/hybrid/HybridBayesTree.cpp @@ -14,7 +14,7 @@ * @brief Hybrid Bayes Tree, the result of eliminating a * HybridJunctionTree * @date Mar 11, 2022 - * @author Fan Jiang + * @author Fan Jiang, Varun Agrawal */ #include @@ -49,7 +49,7 @@ HybridValues HybridBayesTree::optimize() const { // The root should be discrete only, we compute the MPE if (root_conditional->isDiscrete()) { - dbn.push_back(root_conditional->asDiscreteConditional()); + dbn.push_back(root_conditional->asDiscrete()); mpe = DiscreteFactorGraph(dbn).optimize(); } else { throw std::runtime_error( @@ -58,7 +58,7 @@ HybridValues HybridBayesTree::optimize() const { } VectorValues values = optimize(mpe); - return HybridValues(mpe, values); + return HybridValues(values, mpe); } /* ************************************************************************* */ @@ -73,6 +73,8 @@ struct HybridAssignmentData { GaussianBayesTree::sharedNode parentClique_; // The gaussian bayes tree that will be recursively created. GaussianBayesTree* gaussianbayesTree_; + // Flag indicating if all the nodes are valid. Used in optimize(). + bool valid_; /** * @brief Construct a new Hybrid Assignment Data object. @@ -83,10 +85,13 @@ struct HybridAssignmentData { */ HybridAssignmentData(const DiscreteValues& assignment, const GaussianBayesTree::sharedNode& parentClique, - GaussianBayesTree* gbt) + GaussianBayesTree* gbt, bool valid = true) : assignment_(assignment), parentClique_(parentClique), - gaussianbayesTree_(gbt) {} + gaussianbayesTree_(gbt), + valid_(valid) {} + + bool isValid() const { return valid_; } /** * @brief A function used during tree traversal that operates on each node @@ -101,6 +106,7 @@ struct HybridAssignmentData { HybridAssignmentData& parentData) { // Extract the gaussian conditional from the Hybrid clique HybridConditional::shared_ptr hybrid_conditional = node->conditional(); + GaussianConditional::shared_ptr conditional; if (hybrid_conditional->isHybrid()) { conditional = (*hybrid_conditional->asMixture())(parentData.assignment_); @@ -111,22 +117,29 @@ struct HybridAssignmentData { conditional = boost::make_shared(); } - // Create the GaussianClique for the current node - auto clique = boost::make_shared(conditional); - // Add the current clique to the GaussianBayesTree. - parentData.gaussianbayesTree_->addClique(clique, parentData.parentClique_); + GaussianBayesTree::sharedNode clique; + if (conditional) { + // Create the GaussianClique for the current node + clique = boost::make_shared(conditional); + // Add the current clique to the GaussianBayesTree. + parentData.gaussianbayesTree_->addClique(clique, + parentData.parentClique_); + } else { + parentData.valid_ = false; + } // Create new HybridAssignmentData where the current node is the parent // This will be passed down to the children nodes HybridAssignmentData data(parentData.assignment_, clique, - parentData.gaussianbayesTree_); + parentData.gaussianbayesTree_, parentData.valid_); return data; } }; /* ************************************************************************* */ -VectorValues HybridBayesTree::optimize(const DiscreteValues& assignment) const { +GaussianBayesTree HybridBayesTree::choose( + const DiscreteValues& assignment) const { GaussianBayesTree gbt; HybridAssignmentData rootData(assignment, 0, &gbt); { @@ -138,6 +151,20 @@ VectorValues HybridBayesTree::optimize(const DiscreteValues& assignment) const { visitorPost); } + if (!rootData.isValid()) { + return GaussianBayesTree(); + } + return gbt; +} + +/* ************************************************************************* + */ +VectorValues HybridBayesTree::optimize(const DiscreteValues& assignment) const { + GaussianBayesTree gbt = this->choose(assignment); + // If empty GaussianBayesTree, means a clique is pruned hence invalid + if (gbt.size() == 0) { + return VectorValues(); + } VectorValues result = gbt.optimize(); // Return the optimized bayes net result. @@ -147,7 +174,7 @@ VectorValues HybridBayesTree::optimize(const DiscreteValues& assignment) const { /* ************************************************************************* */ void HybridBayesTree::prune(const size_t maxNrLeaves) { auto decisionTree = - this->roots_.at(0)->conditional()->asDiscreteConditional(); + this->roots_.at(0)->conditional()->asDiscrete(); DecisionTreeFactor prunedDecisionTree = decisionTree->prune(maxNrLeaves); decisionTree->root_ = prunedDecisionTree.root_; diff --git a/gtsam/hybrid/HybridBayesTree.h b/gtsam/hybrid/HybridBayesTree.h index 8af0af9683..7240edaac7 100644 --- a/gtsam/hybrid/HybridBayesTree.h +++ b/gtsam/hybrid/HybridBayesTree.h @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -50,9 +51,12 @@ class GTSAM_EXPORT HybridBayesTreeClique typedef boost::shared_ptr shared_ptr; typedef boost::weak_ptr weak_ptr; HybridBayesTreeClique() {} - virtual ~HybridBayesTreeClique() {} HybridBayesTreeClique(const boost::shared_ptr& conditional) : Base(conditional) {} + ///< Copy constructor + HybridBayesTreeClique(const HybridBayesTreeClique& clique) : Base(clique) {} + + virtual ~HybridBayesTreeClique() {} }; /* ************************************************************************* */ @@ -73,6 +77,15 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree { /** Check equality */ bool equals(const This& other, double tol = 1e-9) const; + /** + * @brief Get the Gaussian Bayes Tree which corresponds to a specific discrete + * value assignment. + * + * @param assignment The discrete value assignment for the discrete keys. + * @return GaussianBayesTree + */ + GaussianBayesTree choose(const DiscreteValues& assignment) const; + /** * @brief Optimize the hybrid Bayes tree by computing the MPE for the current * set of discrete variables and using it to compute the best continuous @@ -119,18 +132,15 @@ struct traits : public Testable {}; * This object stores parent keys in our base type factor so that * eliminating those parent keys will pull this subtree into the * elimination. - * This does special stuff for the hybrid case. * - * @tparam CLIQUE + * This is a template instantiation for hybrid Bayes tree cliques, storing both + * the regular keys *and* discrete keys in the HybridConditional. */ -template -class BayesTreeOrphanWrapper< - CLIQUE, typename std::enable_if< - boost::is_same::value> > - : public CLIQUE::ConditionalType { +template <> +class BayesTreeOrphanWrapper : public HybridConditional { public: - typedef CLIQUE CliqueType; - typedef typename CLIQUE::ConditionalType Base; + typedef HybridBayesTreeClique CliqueType; + typedef HybridConditional Base; boost::shared_ptr clique; diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 8e071532dd..24f61a85f4 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -38,7 +39,7 @@ HybridConditional::HybridConditional(const KeyVector &continuousFrontals, /* ************************************************************************ */ HybridConditional::HybridConditional( - boost::shared_ptr continuousConditional) + const boost::shared_ptr &continuousConditional) : HybridConditional(continuousConditional->keys(), {}, continuousConditional->nrFrontals()) { inner_ = continuousConditional; @@ -46,7 +47,7 @@ HybridConditional::HybridConditional( /* ************************************************************************ */ HybridConditional::HybridConditional( - boost::shared_ptr discreteConditional) + const boost::shared_ptr &discreteConditional) : HybridConditional({}, discreteConditional->discreteKeys(), discreteConditional->nrFrontals()) { inner_ = discreteConditional; @@ -54,7 +55,7 @@ HybridConditional::HybridConditional( /* ************************************************************************ */ HybridConditional::HybridConditional( - boost::shared_ptr gaussianMixture) + const boost::shared_ptr &gaussianMixture) : BaseFactor(KeyVector(gaussianMixture->keys().begin(), gaussianMixture->keys().begin() + gaussianMixture->nrContinuous()), @@ -102,7 +103,72 @@ void HybridConditional::print(const std::string &s, /* ************************************************************************ */ bool HybridConditional::equals(const HybridFactor &other, double tol) const { const This *e = dynamic_cast(&other); - return e != nullptr && BaseFactor::equals(*e, tol); + if (e == nullptr) return false; + if (auto gm = asMixture()) { + auto other = e->asMixture(); + return other != nullptr && gm->equals(*other, tol); + } + if (auto gc = asGaussian()) { + auto other = e->asGaussian(); + return other != nullptr && gc->equals(*other, tol); + } + if (auto dc = asDiscrete()) { + auto other = e->asDiscrete(); + return other != nullptr && dc->equals(*other, tol); + } + + return inner_ ? (e->inner_ ? inner_->equals(*(e->inner_), tol) : false) + : !(e->inner_); +} + +/* ************************************************************************ */ +double HybridConditional::error(const HybridValues &values) const { + if (auto gc = asGaussian()) { + return gc->error(values.continuous()); + } + if (auto gm = asMixture()) { + return gm->error(values); + } + if (auto dc = asDiscrete()) { + return dc->error(values.discrete()); + } + throw std::runtime_error( + "HybridConditional::error: conditional type not handled"); +} + +/* ************************************************************************ */ +double HybridConditional::logProbability(const HybridValues &values) const { + if (auto gc = asGaussian()) { + return gc->logProbability(values.continuous()); + } + if (auto gm = asMixture()) { + return gm->logProbability(values); + } + if (auto dc = asDiscrete()) { + return dc->logProbability(values.discrete()); + } + throw std::runtime_error( + "HybridConditional::logProbability: conditional type not handled"); +} + +/* ************************************************************************ */ +double HybridConditional::logNormalizationConstant() const { + if (auto gc = asGaussian()) { + return gc->logNormalizationConstant(); + } + if (auto gm = asMixture()) { + return gm->logNormalizationConstant(); // 0.0! + } + if (auto dc = asDiscrete()) { + return dc->logNormalizationConstant(); // 0.0! + } + throw std::runtime_error( + "HybridConditional::logProbability: conditional type not handled"); +} + +/* ************************************************************************ */ +double HybridConditional::evaluate(const HybridValues &values) const { + return std::exp(logProbability(values)); } } // namespace gtsam diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 050f10290e..c8cb968dff 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -52,7 +52,7 @@ namespace gtsam { * having diamond inheritances, and neutralized the need to change other * components of GTSAM to make hybrid elimination work. * - * A great reference to the type-erasure pattern is Eduaado Madrid's CppCon + * A great reference to the type-erasure pattern is Eduardo Madrid's CppCon * talk (https://www.youtube.com/watch?v=s082Qmd_nHs). * * @ingroup hybrid @@ -111,7 +111,7 @@ class GTSAM_EXPORT HybridConditional * HybridConditional. */ HybridConditional( - boost::shared_ptr continuousConditional); + const boost::shared_ptr& continuousConditional); /** * @brief Construct a new Hybrid Conditional object @@ -119,7 +119,8 @@ class GTSAM_EXPORT HybridConditional * @param discreteConditional Conditional used to create the * HybridConditional. */ - HybridConditional(boost::shared_ptr discreteConditional); + HybridConditional( + const boost::shared_ptr& discreteConditional); /** * @brief Construct a new Hybrid Conditional object @@ -127,56 +128,81 @@ class GTSAM_EXPORT HybridConditional * @param gaussianMixture Gaussian Mixture Conditional used to create the * HybridConditional. */ - HybridConditional(boost::shared_ptr gaussianMixture); + HybridConditional(const boost::shared_ptr& gaussianMixture); + + /// @} + /// @name Testable + /// @{ + + /// GTSAM-style print + void print( + const std::string& s = "Hybrid Conditional: ", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; + + /// GTSAM-style equals + bool equals(const HybridFactor& other, double tol = 1e-9) const override; + + /// @} + /// @name Standard Interface + /// @{ /** * @brief Return HybridConditional as a GaussianMixture - * - * @return GaussianMixture::shared_ptr + * @return nullptr if not a mixture + * @return GaussianMixture::shared_ptr otherwise */ - GaussianMixture::shared_ptr asMixture() { - if (!isHybrid()) throw std::invalid_argument("Not a mixture"); - return boost::static_pointer_cast(inner_); + GaussianMixture::shared_ptr asMixture() const { + return boost::dynamic_pointer_cast(inner_); } /** * @brief Return HybridConditional as a GaussianConditional - * - * @return GaussianConditional::shared_ptr + * @return nullptr if not a GaussianConditional + * @return GaussianConditional::shared_ptr otherwise */ - GaussianConditional::shared_ptr asGaussian() { - if (!isContinuous()) - throw std::invalid_argument("Not a continuous conditional"); - return boost::static_pointer_cast(inner_); + GaussianConditional::shared_ptr asGaussian() const { + return boost::dynamic_pointer_cast(inner_); } /** * @brief Return conditional as a DiscreteConditional - * + * @return nullptr if not a DiscreteConditional * @return DiscreteConditional::shared_ptr */ - DiscreteConditional::shared_ptr asDiscreteConditional() { - if (!isDiscrete()) - throw std::invalid_argument("Not a discrete conditional"); - return boost::static_pointer_cast(inner_); + DiscreteConditional::shared_ptr asDiscrete() const { + return boost::dynamic_pointer_cast(inner_); } - /// @} - /// @name Testable - /// @{ + /// Get the type-erased pointer to the inner type + boost::shared_ptr inner() const { return inner_; } - /// GTSAM-style print - void print( - const std::string& s = "Hybrid Conditional: ", - const KeyFormatter& formatter = DefaultKeyFormatter) const override; + /// Return the error of the underlying conditional. + double error(const HybridValues& values) const override; - /// GTSAM-style equals - bool equals(const HybridFactor& other, double tol = 1e-9) const override; + /// Return the log-probability (or density) of the underlying conditional. + double logProbability(const HybridValues& values) const override; - /// @} + /** + * Return the log normalization constant. + * Note this is 0.0 for discrete and hybrid conditionals, but depends + * on the continuous parameters for Gaussian conditionals. + */ + double logNormalizationConstant() const override; + + /// Return the probability (or density) of the underlying conditional. + double evaluate(const HybridValues& values) const override; + + /// Check if VectorValues `measurements` contains all frontal keys. + bool frontalsIn(const VectorValues& measurements) const { + for (Key key : frontals()) { + if (!measurements.exists(key)) { + return false; + } + } + return true; + } - /// Get the type-erased pointer to the inner type - boost::shared_ptr inner() { return inner_; } + /// @} private: /** Serialization function */ @@ -185,6 +211,20 @@ class GTSAM_EXPORT HybridConditional void serialize(Archive& ar, const unsigned int /*version*/) { ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseFactor); ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseConditional); + ar& BOOST_SERIALIZATION_NVP(inner_); + + // register the various casts based on the type of inner_ + // https://www.boost.org/doc/libs/1_80_0/libs/serialization/doc/serialization.html#runtimecasting + if (isDiscrete()) { + boost::serialization::void_cast_register( + static_cast(NULL), static_cast(NULL)); + } else if (isContinuous()) { + boost::serialization::void_cast_register( + static_cast(NULL), static_cast(NULL)); + } else { + boost::serialization::void_cast_register( + static_cast(NULL), static_cast(NULL)); + } } }; // HybridConditional diff --git a/gtsam/hybrid/HybridDiscreteFactor.cpp b/gtsam/hybrid/HybridDiscreteFactor.cpp deleted file mode 100644 index 0455e1e904..0000000000 --- a/gtsam/hybrid/HybridDiscreteFactor.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file HybridDiscreteFactor.cpp - * @brief Wrapper for a discrete factor - * @date Mar 11, 2022 - * @author Fan Jiang - */ - -#include - -#include - -#include "gtsam/discrete/DecisionTreeFactor.h" - -namespace gtsam { - -/* ************************************************************************ */ -// TODO(fan): THIS IS VERY VERY DIRTY! We need to get DiscreteFactor right! -HybridDiscreteFactor::HybridDiscreteFactor(DiscreteFactor::shared_ptr other) - : Base(boost::dynamic_pointer_cast(other) - ->discreteKeys()), - inner_(other) {} - -/* ************************************************************************ */ -HybridDiscreteFactor::HybridDiscreteFactor(DecisionTreeFactor &&dtf) - : Base(dtf.discreteKeys()), - inner_(boost::make_shared(std::move(dtf))) {} - -/* ************************************************************************ */ -bool HybridDiscreteFactor::equals(const HybridFactor &lf, double tol) const { - const This *e = dynamic_cast(&lf); - // TODO(Varun) How to compare inner_ when they are abstract types? - return e != nullptr && Base::equals(*e, tol); -} - -/* ************************************************************************ */ -void HybridDiscreteFactor::print(const std::string &s, - const KeyFormatter &formatter) const { - HybridFactor::print(s, formatter); - inner_->print("\n", formatter); -}; - -} // namespace gtsam diff --git a/gtsam/hybrid/HybridDiscreteFactor.h b/gtsam/hybrid/HybridDiscreteFactor.h deleted file mode 100644 index 015dc46f8c..0000000000 --- a/gtsam/hybrid/HybridDiscreteFactor.h +++ /dev/null @@ -1,71 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file HybridDiscreteFactor.h - * @date Mar 11, 2022 - * @author Fan Jiang - * @author Varun Agrawal - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - -/** - * A HybridDiscreteFactor is a thin container for DiscreteFactor, which allows - * us to hide the implementation of DiscreteFactor and thus avoid diamond - * inheritance. - * - * @ingroup hybrid - */ -class GTSAM_EXPORT HybridDiscreteFactor : public HybridFactor { - private: - DiscreteFactor::shared_ptr inner_; - - public: - using Base = HybridFactor; - using This = HybridDiscreteFactor; - using shared_ptr = boost::shared_ptr; - - /// @name Constructors - /// @{ - - // Implicit conversion from a shared ptr of DF - HybridDiscreteFactor(DiscreteFactor::shared_ptr other); - - // Forwarding constructor from concrete DecisionTreeFactor - HybridDiscreteFactor(DecisionTreeFactor &&dtf); - - /// @} - /// @name Testable - /// @{ - virtual bool equals(const HybridFactor &lf, double tol) const override; - - void print( - const std::string &s = "HybridFactor\n", - const KeyFormatter &formatter = DefaultKeyFormatter) const override; - - /// @} - - /// Return pointer to the internal discrete factor - DiscreteFactor::shared_ptr inner() const { return inner_; } -}; - -// traits -template <> -struct traits : public Testable {}; - -} // namespace gtsam diff --git a/gtsam/hybrid/HybridEliminationTree.h b/gtsam/hybrid/HybridEliminationTree.h index b2dd1bd9c8..941fefa5a5 100644 --- a/gtsam/hybrid/HybridEliminationTree.h +++ b/gtsam/hybrid/HybridEliminationTree.h @@ -24,7 +24,7 @@ namespace gtsam { /** - * Elimination Tree type for Hybrid + * Elimination Tree type for Hybrid Factor Graphs. * * @ingroup hybrid */ diff --git a/gtsam/hybrid/HybridFactor.cpp b/gtsam/hybrid/HybridFactor.cpp index 1216fd9224..b25e97f051 100644 --- a/gtsam/hybrid/HybridFactor.cpp +++ b/gtsam/hybrid/HybridFactor.cpp @@ -81,7 +81,7 @@ bool HybridFactor::equals(const HybridFactor &lf, double tol) const { /* ************************************************************************ */ void HybridFactor::print(const std::string &s, const KeyFormatter &formatter) const { - std::cout << s; + std::cout << (s.empty() ? "" : s + "\n"); if (isContinuous_) std::cout << "Continuous "; if (isDiscrete_) std::cout << "Discrete "; if (isHybrid_) std::cout << "Hybrid "; diff --git a/gtsam/hybrid/HybridFactor.h b/gtsam/hybrid/HybridFactor.h index e0cae55c1e..0fa352191b 100644 --- a/gtsam/hybrid/HybridFactor.h +++ b/gtsam/hybrid/HybridFactor.h @@ -18,14 +18,21 @@ #pragma once #include +#include #include #include +#include #include #include #include namespace gtsam { +class HybridValues; + +/// Alias for DecisionTree of GaussianFactorGraphs +using GaussianFactorGraphTree = DecisionTree; + KeyVector CollectKeys(const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys); KeyVector CollectKeys(const KeyVector &keys1, const KeyVector &keys2); @@ -33,11 +40,10 @@ DiscreteKeys CollectDiscreteKeys(const DiscreteKeys &key1, const DiscreteKeys &key2); /** - * Base class for hybrid probabilistic factors + * Base class for *truly* hybrid probabilistic factors * * Examples: - * - HybridGaussianFactor - * - HybridDiscreteFactor + * - MixtureFactor * - GaussianMixtureFactor * - GaussianMixture * diff --git a/gtsam/hybrid/HybridFactorGraph.cpp b/gtsam/hybrid/HybridFactorGraph.cpp new file mode 100644 index 0000000000..7fb2801162 --- /dev/null +++ b/gtsam/hybrid/HybridFactorGraph.cpp @@ -0,0 +1,79 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2022, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridFactorGraph.cpp + * @brief Factor graph with utilities for hybrid factors. + * @author Varun Agrawal + * @author Frank Dellaert + * @date January, 2023 + */ + +#include +#include + +#include + +namespace gtsam { + +/* ************************************************************************* */ +std::set HybridFactorGraph::discreteKeys() const { + std::set keys; + for (auto& factor : factors_) { + if (auto p = boost::dynamic_pointer_cast(factor)) { + for (const DiscreteKey& key : p->discreteKeys()) { + keys.insert(key); + } + } + if (auto p = boost::dynamic_pointer_cast(factor)) { + for (const DiscreteKey& key : p->discreteKeys()) { + keys.insert(key); + } + } + } + return keys; +} + +/* ************************************************************************* */ +KeySet HybridFactorGraph::discreteKeySet() const { + KeySet keys; + std::set key_set = discreteKeys(); + std::transform(key_set.begin(), key_set.end(), + std::inserter(keys, keys.begin()), + [](const DiscreteKey& k) { return k.first; }); + return keys; +} + +/* ************************************************************************* */ +std::unordered_map HybridFactorGraph::discreteKeyMap() const { + std::unordered_map result; + for (const DiscreteKey& k : discreteKeys()) { + result[k.first] = k; + } + return result; +} + +/* ************************************************************************* */ +const KeySet HybridFactorGraph::continuousKeySet() const { + KeySet keys; + for (auto& factor : factors_) { + if (auto p = boost::dynamic_pointer_cast(factor)) { + for (const Key& key : p->continuousKeys()) { + keys.insert(key); + } + } + } + return keys; +} + +/* ************************************************************************* */ + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridFactorGraph.h b/gtsam/hybrid/HybridFactorGraph.h index 05a17b000f..a02d4a2129 100644 --- a/gtsam/hybrid/HybridFactorGraph.h +++ b/gtsam/hybrid/HybridFactorGraph.h @@ -11,50 +11,40 @@ /** * @file HybridFactorGraph.h - * @brief Hybrid factor graph base class that uses type erasure + * @brief Factor graph with utilities for hybrid factors. * @author Varun Agrawal + * @author Frank Dellaert * @date May 28, 2022 */ #pragma once -#include -#include #include #include -#include #include +#include + namespace gtsam { +class DiscreteFactor; +class Ordering; + using SharedFactor = boost::shared_ptr; /** * Hybrid Factor Graph - * ----------------------- - * This is the base hybrid factor graph. - * Everything inside needs to be hybrid factor or hybrid conditional. + * Factor graph with utilities for hybrid factors. */ -class HybridFactorGraph : public FactorGraph { +class HybridFactorGraph : public FactorGraph { public: - using Base = FactorGraph; + using Base = FactorGraph; using This = HybridFactorGraph; ///< this class using shared_ptr = boost::shared_ptr; ///< shared_ptr to This using Values = gtsam::Values; ///< backwards compatibility using Indices = KeyVector; ///> map from keys to values - protected: - /// Check if FACTOR type is derived from DiscreteFactor. - template - using IsDiscrete = typename std::enable_if< - std::is_base_of::value>::type; - - /// Check if FACTOR type is derived from HybridFactor. - template - using IsHybrid = typename std::enable_if< - std::is_base_of::value>::type; - public: /// @name Constructors /// @{ @@ -71,92 +61,22 @@ class HybridFactorGraph : public FactorGraph { HybridFactorGraph(const FactorGraph& graph) : Base(graph) {} /// @} + /// @name Extra methods to inspect discrete/continuous keys. + /// @{ - // Allow use of selected FactorGraph methods: - using Base::empty; - using Base::reserve; - using Base::size; - using Base::operator[]; - using Base::add; - using Base::push_back; - using Base::resize; - - /** - * Add a discrete factor *pointer* to the internal discrete graph - * @param discreteFactor - boost::shared_ptr to the factor to add - */ - template - IsDiscrete push_discrete( - const boost::shared_ptr& discreteFactor) { - Base::push_back(boost::make_shared(discreteFactor)); - } - - /** - * Add a discrete-continuous (Hybrid) factor *pointer* to the graph - * @param hybridFactor - boost::shared_ptr to the factor to add - */ - template - IsHybrid push_hybrid(const boost::shared_ptr& hybridFactor) { - Base::push_back(hybridFactor); - } - - /// delete emplace_shared. - template - void emplace_shared(Args&&... args) = delete; - - /// Construct a factor and add (shared pointer to it) to factor graph. - template - IsDiscrete emplace_discrete(Args&&... args) { - auto factor = boost::allocate_shared( - Eigen::aligned_allocator(), std::forward(args)...); - push_discrete(factor); - } - - /// Construct a factor and add (shared pointer to it) to factor graph. - template - IsHybrid emplace_hybrid(Args&&... args) { - auto factor = boost::allocate_shared( - Eigen::aligned_allocator(), std::forward(args)...); - push_hybrid(factor); - } + /// Get all the discrete keys in the factor graph. + std::set discreteKeys() const; - /** - * @brief Add a single factor shared pointer to the hybrid factor graph. - * Dynamically handles the factor type and assigns it to the correct - * underlying container. - * - * @param sharedFactor The factor to add to this factor graph. - */ - void push_back(const SharedFactor& sharedFactor) { - if (auto p = boost::dynamic_pointer_cast(sharedFactor)) { - push_discrete(p); - } - if (auto p = boost::dynamic_pointer_cast(sharedFactor)) { - push_hybrid(p); - } - } + /// Get all the discrete keys in the factor graph, as a set. + KeySet discreteKeySet() const; - /// Get all the discrete keys in the factor graph. - const KeySet discreteKeys() const { - KeySet discrete_keys; - for (auto& factor : factors_) { - for (const DiscreteKey& k : factor->discreteKeys()) { - discrete_keys.insert(k.first); - } - } - return discrete_keys; - } + /// Get a map from Key to corresponding DiscreteKey. + std::unordered_map discreteKeyMap() const; /// Get all the continuous keys in the factor graph. - const KeySet continuousKeys() const { - KeySet keys; - for (auto& factor : factors_) { - for (const Key& key : factor->continuousKeys()) { - keys.insert(key); - } - } - return keys; - } + const KeySet continuousKeySet() const; + + /// @} }; } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp deleted file mode 100644 index 4d7490e493..0000000000 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file HybridGaussianFactor.cpp - * @date Mar 11, 2022 - * @author Fan Jiang - */ - -#include - -#include - -namespace gtsam { - -/* ************************************************************************* */ -HybridGaussianFactor::HybridGaussianFactor(GaussianFactor::shared_ptr other) - : Base(other->keys()), inner_(other) {} - -/* ************************************************************************* */ -HybridGaussianFactor::HybridGaussianFactor(JacobianFactor &&jf) - : Base(jf.keys()), - inner_(boost::make_shared(std::move(jf))) {} - -/* ************************************************************************* */ -bool HybridGaussianFactor::equals(const HybridFactor &other, double tol) const { - const This *e = dynamic_cast(&other); - // TODO(Varun) How to compare inner_ when they are abstract types? - return e != nullptr && Base::equals(*e, tol); -} - -/* ************************************************************************* */ -void HybridGaussianFactor::print(const std::string &s, - const KeyFormatter &formatter) const { - HybridFactor::print(s, formatter); - inner_->print("\n", formatter); -}; - -} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h deleted file mode 100644 index 6ca62921c1..0000000000 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ /dev/null @@ -1,71 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file HybridGaussianFactor.h - * @date Mar 11, 2022 - * @author Fan Jiang - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - -/** - * A HybridGaussianFactor is a layer over GaussianFactor so that we do not have - * a diamond inheritance i.e. an extra factor type that inherits from both - * HybridFactor and GaussianFactor. - * - * @ingroup hybrid - */ -class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { - private: - GaussianFactor::shared_ptr inner_; - - public: - using Base = HybridFactor; - using This = HybridGaussianFactor; - using shared_ptr = boost::shared_ptr; - - HybridGaussianFactor() = default; - - // Explicit conversion from a shared ptr of GF - explicit HybridGaussianFactor(GaussianFactor::shared_ptr other); - - // Forwarding constructor from concrete JacobianFactor - explicit HybridGaussianFactor(JacobianFactor &&jf); - - public: - /// @name Testable - /// @{ - - /// Check equality. - virtual bool equals(const HybridFactor &lf, double tol) const override; - - /// GTSAM print utility. - void print( - const std::string &s = "HybridGaussianFactor\n", - const KeyFormatter &formatter = DefaultKeyFormatter) const override; - - /// @} - - GaussianFactor::shared_ptr inner() const { return inner_; } -}; - -// traits -template <> -struct traits : public Testable {}; - -} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 041603fbd5..c912a74fc7 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -26,10 +26,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -47,224 +45,263 @@ #include #include #include -#include #include #include +// #define HYBRID_TIMING + namespace gtsam { +/// Specialize EliminateableFactorGraph for HybridGaussianFactorGraph: template class EliminateableFactorGraph; +using OrphanWrapper = BayesTreeOrphanWrapper; + +using boost::dynamic_pointer_cast; + +/* ************************************************************************ */ +// Throw a runtime exception for method specified in string s, and factor f: +static void throwRuntimeError(const std::string &s, + const boost::shared_ptr &f) { + auto &fr = *f; + throw std::runtime_error(s + " not implemented for factor type " + + demangle(typeid(fr).name()) + "."); +} + /* ************************************************************************ */ -static GaussianMixtureFactor::Sum &addGaussian( - GaussianMixtureFactor::Sum &sum, const GaussianFactor::shared_ptr &factor) { - using Y = GaussianFactorGraph; - // If the decision tree is not intiialized, then intialize it. - if (sum.empty()) { - GaussianFactorGraph result; - result.push_back(factor); - sum = GaussianMixtureFactor::Sum(result); +const Ordering HybridOrdering(const HybridGaussianFactorGraph &graph) { + KeySet discrete_keys = graph.discreteKeySet(); + const VariableIndex index(graph); + return Ordering::ColamdConstrainedLast( + index, KeyVector(discrete_keys.begin(), discrete_keys.end()), true); +} +/* ************************************************************************ */ +static GaussianFactorGraphTree addGaussian( + const GaussianFactorGraphTree &gfgTree, + const GaussianFactor::shared_ptr &factor) { + // If the decision tree is not initialized, then initialize it. + if (gfgTree.empty()) { + GaussianFactorGraph result{factor}; + return GaussianFactorGraphTree(result); } else { - auto add = [&factor](const Y &graph) { + auto add = [&factor](const GaussianFactorGraph &graph) { auto result = graph; result.push_back(factor); return result; }; - sum = sum.apply(add); + return gfgTree.apply(add); } - return sum; } /* ************************************************************************ */ -GaussianMixtureFactor::Sum sumFrontals( - const HybridGaussianFactorGraph &factors) { - // sum out frontals, this is the factor on the separator - gttic(sum); - - GaussianMixtureFactor::Sum sum; - std::vector deferredFactors; - - for (auto &f : factors) { - if (f->isHybrid()) { - if (auto cgmf = boost::dynamic_pointer_cast(f)) { - sum = cgmf->add(sum); - } - - if (auto gm = boost::dynamic_pointer_cast(f)) { - sum = gm->asMixture()->add(sum); - } - - } else if (f->isContinuous()) { - if (auto gf = boost::dynamic_pointer_cast(f)) { - deferredFactors.push_back(gf->inner()); - } - if (auto cg = boost::dynamic_pointer_cast(f)) { - deferredFactors.push_back(cg->asGaussian()); +// TODO(dellaert): it's probably more efficient to first collect the discrete +// keys, and then loop over all assignments to populate a vector. +GaussianFactorGraphTree HybridGaussianFactorGraph::assembleGraphTree() const { + gttic(assembleGraphTree); + + GaussianFactorGraphTree result; + + for (auto &f : factors_) { + // TODO(dellaert): just use a virtual method defined in HybridFactor. + if (auto gf = dynamic_pointer_cast(f)) { + result = addGaussian(result, gf); + } else if (auto gm = dynamic_pointer_cast(f)) { + result = gm->add(result); + } else if (auto hc = dynamic_pointer_cast(f)) { + if (auto gm = hc->asMixture()) { + result = gm->add(result); + } else if (auto g = hc->asGaussian()) { + result = addGaussian(result, g); + } else { + // Has to be discrete. + // TODO(dellaert): in C++20, we can use std::visit. + continue; } - - } else if (f->isDiscrete()) { + } else if (dynamic_pointer_cast(f)) { // Don't do anything for discrete-only factors // since we want to eliminate continuous values only. continue; - } else { - // We need to handle the case where the object is actually an - // BayesTreeOrphanWrapper! - auto orphan = boost::dynamic_pointer_cast< - BayesTreeOrphanWrapper>(f); - if (!orphan) { - auto &fr = *f; - throw std::invalid_argument( - std::string("factor is discrete in continuous elimination ") + - demangle(typeid(fr).name())); - } + // TODO(dellaert): there was an unattributed comment here: We need to + // handle the case where the object is actually an BayesTreeOrphanWrapper! + throwRuntimeError("gtsam::assembleGraphTree", f); } } - for (auto &f : deferredFactors) { - sum = addGaussian(sum, f); - } - - gttoc(sum); + gttoc(assembleGraphTree); - return sum; + return result; } /* ************************************************************************ */ -std::pair +static std::pair> continuousElimination(const HybridGaussianFactorGraph &factors, const Ordering &frontalKeys) { GaussianFactorGraph gfg; - for (auto &fp : factors) { - if (auto ptr = boost::dynamic_pointer_cast(fp)) { - gfg.push_back(ptr->inner()); - } else if (auto ptr = boost::static_pointer_cast(fp)) { - gfg.push_back( - boost::static_pointer_cast(ptr->inner())); + for (auto &f : factors) { + if (auto gf = dynamic_pointer_cast(f)) { + gfg.push_back(gf); + } else if (auto orphan = dynamic_pointer_cast(f)) { + // Ignore orphaned clique. + // TODO(dellaert): is this correct? If so explain here. + } else if (auto hc = dynamic_pointer_cast(f)) { + auto gc = hc->asGaussian(); + if (!gc) throwRuntimeError("continuousElimination", gc); + gfg.push_back(gc); } else { - // It is an orphan wrapped conditional + throwRuntimeError("continuousElimination", f); } } auto result = EliminatePreferCholesky(gfg, frontalKeys); - return {boost::make_shared(result.first), - boost::make_shared(result.second)}; + return {boost::make_shared(result.first), result.second}; } /* ************************************************************************ */ -std::pair +static std::pair> discreteElimination(const HybridGaussianFactorGraph &factors, const Ordering &frontalKeys) { DiscreteFactorGraph dfg; - for (auto &factor : factors) { - if (auto p = boost::dynamic_pointer_cast(factor)) { - dfg.push_back(p->inner()); - } else if (auto p = boost::static_pointer_cast(factor)) { - auto discrete_conditional = - boost::static_pointer_cast(p->inner()); - dfg.push_back(discrete_conditional); + for (auto &f : factors) { + if (auto dtf = dynamic_pointer_cast(f)) { + dfg.push_back(dtf); + } else if (auto orphan = dynamic_pointer_cast(f)) { + // Ignore orphaned clique. + // TODO(dellaert): is this correct? If so explain here. + } else if (auto hc = dynamic_pointer_cast(f)) { + auto dc = hc->asDiscrete(); + if (!dc) throwRuntimeError("continuousElimination", dc); + dfg.push_back(hc->asDiscrete()); } else { - // It is an orphan wrapper + throwRuntimeError("continuousElimination", f); } } - auto result = EliminateForMPE(dfg, frontalKeys); + // NOTE: This does sum-product. For max-product, use EliminateForMPE. + auto result = EliminateDiscrete(dfg, frontalKeys); + + return {boost::make_shared(result.first), result.second}; +} - return {boost::make_shared(result.first), - boost::make_shared(result.second)}; +/* ************************************************************************ */ +// If any GaussianFactorGraph in the decision tree contains a nullptr, convert +// that leaf to an empty GaussianFactorGraph. Needed since the DecisionTree will +// otherwise create a GFG with a single (null) factor. +// TODO(dellaert): still a mystery to me why this is needed. +GaussianFactorGraphTree removeEmpty(const GaussianFactorGraphTree &sum) { + auto emptyGaussian = [](const GaussianFactorGraph &graph) { + bool hasNull = + std::any_of(graph.begin(), graph.end(), + [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }); + return hasNull ? GaussianFactorGraph() : graph; + }; + return GaussianFactorGraphTree(sum, emptyGaussian); } /* ************************************************************************ */ -std::pair +static std::pair> hybridElimination(const HybridGaussianFactorGraph &factors, const Ordering &frontalKeys, - const KeySet &continuousSeparator, + const KeyVector &continuousSeparator, const std::set &discreteSeparatorSet) { // NOTE: since we use the special JunctionTree, - // only possiblity is continuous conditioned on discrete. + // only possibility is continuous conditioned on discrete. DiscreteKeys discreteSeparator(discreteSeparatorSet.begin(), discreteSeparatorSet.end()); - // sum out frontals, this is the factor on the separator - GaussianMixtureFactor::Sum sum = sumFrontals(factors); + // Collect all the factors to create a set of Gaussian factor graphs in a + // decision tree indexed by all discrete keys involved. + GaussianFactorGraphTree factorGraphTree = factors.assembleGraphTree(); - // If a tree leaf contains nullptr, - // convert that leaf to an empty GaussianFactorGraph. - // Needed since the DecisionTree will otherwise create - // a GFG with a single (null) factor. - auto emptyGaussian = [](const GaussianFactorGraph &gfg) { - bool hasNull = - std::any_of(gfg.begin(), gfg.end(), - [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }); - - return hasNull ? GaussianFactorGraph() : gfg; - }; - sum = GaussianMixtureFactor::Sum(sum, emptyGaussian); - - using EliminationPair = GaussianFactorGraph::EliminationResult; + // Convert factor graphs with a nullptr to an empty factor graph. + // This is done after assembly since it is non-trivial to keep track of which + // FG has a nullptr as we're looping over the factors. + factorGraphTree = removeEmpty(factorGraphTree); - KeyVector keysOfEliminated; // Not the ordering - KeyVector keysOfSeparator; // TODO(frank): Is this just (keys - ordering)? + using Result = std::pair, + GaussianMixtureFactor::sharedFactor>; // This is the elimination method on the leaf nodes - auto eliminate = [&](const GaussianFactorGraph &graph) - -> GaussianFactorGraph::EliminationResult { + auto eliminate = [&](const GaussianFactorGraph &graph) -> Result { if (graph.empty()) { return {nullptr, nullptr}; } - std::pair, - boost::shared_ptr> - result = EliminatePreferCholesky(graph, frontalKeys); - - if (keysOfEliminated.empty()) { - // Initialize the keysOfEliminated to be the keys of the - // eliminated GaussianConditional - keysOfEliminated = result.first->keys(); - } - if (keysOfSeparator.empty()) { - keysOfSeparator = result.second->keys(); - } + +#ifdef HYBRID_TIMING + gttic_(hybrid_eliminate); +#endif + + auto result = EliminatePreferCholesky(graph, frontalKeys); + +#ifdef HYBRID_TIMING + gttoc_(hybrid_eliminate); +#endif + return result; }; // Perform elimination! - DecisionTree eliminationResults(sum, eliminate); + DecisionTree eliminationResults(factorGraphTree, eliminate); - // Separate out decision tree into conditionals and remaining factors. - auto pair = unzip(eliminationResults); +#ifdef HYBRID_TIMING + tictoc_print_(); + tictoc_reset_(); +#endif - const GaussianMixtureFactor::Factors &separatorFactors = pair.second; + // Separate out decision tree into conditionals and remaining factors. + GaussianMixture::Conditionals conditionals; + GaussianMixtureFactor::Factors newFactors; + std::tie(conditionals, newFactors) = unzip(eliminationResults); // Create the GaussianMixture from the conditionals - auto conditional = boost::make_shared( - frontalKeys, keysOfSeparator, discreteSeparator, pair.first); - - // If there are no more continuous parents, then we should create here a - // DiscreteFactor, with the error for each discrete choice. - if (keysOfSeparator.empty()) { - VectorValues empty_values; - auto factorError = [&](const GaussianFactor::shared_ptr &factor) { - if (!factor) return 0.0; // TODO(fan): does this make sense? - return exp(-factor->error(empty_values)); + auto gaussianMixture = boost::make_shared( + frontalKeys, continuousSeparator, discreteSeparator, conditionals); + + if (continuousSeparator.empty()) { + // If there are no more continuous parents, then we create a + // DiscreteFactor here, with the error for each discrete choice. + + // Integrate the probability mass in the last continuous conditional using + // the unnormalized probability q(μ;m) = exp(-error(μ;m)) at the mean. + // discrete_probability = exp(-error(μ;m)) * sqrt(det(2Ï€ Σ_m)) + auto probability = [&](const Result &pair) -> double { + static const VectorValues kEmpty; + // If the factor is not null, it has no keys, just contains the residual. + const auto &factor = pair.second; + if (!factor) return 1.0; // TODO(dellaert): not loving this. + return exp(-factor->error(kEmpty)) / pair.first->normalizationConstant(); }; - DecisionTree fdt(separatorFactors, factorError); - auto discreteFactor = - boost::make_shared(discreteSeparator, fdt); + DecisionTree probabilities(eliminationResults, probability); + return {boost::make_shared(gaussianMixture), + boost::make_shared(discreteSeparator, + probabilities)}; + } else { + // Otherwise, we create a resulting GaussianMixtureFactor on the separator, + // taking care to correct for conditional constant. + + // Correct for the normalization constant used up by the conditional + auto correct = [&](const Result &pair) -> GaussianFactor::shared_ptr { + const auto &factor = pair.second; + if (!factor) return factor; // TODO(dellaert): not loving this. + auto hf = boost::dynamic_pointer_cast(factor); + if (!hf) throw std::runtime_error("Expected HessianFactor!"); + hf->constantTerm() += 2.0 * pair.first->logNormalizationConstant(); + return hf; + }; - return {boost::make_shared(conditional), - boost::make_shared(discreteFactor)}; + GaussianMixtureFactor::Factors correctedFactors(eliminationResults, + correct); + const auto mixtureFactor = boost::make_shared( + continuousSeparator, discreteSeparator, newFactors); - } else { - // Create a resulting GaussianMixtureFactor on the separator. - auto factor = boost::make_shared( - KeyVector(continuousSeparator.begin(), continuousSeparator.end()), - discreteSeparator, separatorFactors); - return {boost::make_shared(conditional), factor}; + return {boost::make_shared(gaussianMixture), + mixtureFactor}; } } + /* ************************************************************************ * Function to eliminate variables **under the following assumptions**: * 1. When the ordering is fully continuous, and the graph only contains @@ -279,7 +316,7 @@ hybridElimination(const HybridGaussianFactorGraph &factors, * eliminate a discrete variable (as specified in the ordering), the result will * be INCORRECT and there will be NO error raised. */ -std::pair // +std::pair> // EliminateHybrid(const HybridGaussianFactorGraph &factors, const Ordering &frontalKeys) { // NOTE: Because we are in the Conditional Gaussian regime there are only @@ -327,100 +364,116 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, // However this is also the case with iSAM2, so no pressure :) // PREPROCESS: Identify the nature of the current elimination - std::unordered_map mapFromKeyToDiscreteKey; - std::set discreteSeparatorSet; - std::set discreteFrontals; - KeySet separatorKeys; - KeySet allContinuousKeys; - KeySet continuousFrontals; - KeySet continuousSeparator; + // TODO(dellaert): just check the factors: + // 1. if all factors are discrete, then we can do discrete elimination: + // 2. if all factors are continuous, then we can do continuous elimination: + // 3. if not, we do hybrid elimination: - // This initializes separatorKeys and mapFromKeyToDiscreteKey + // First, identify the separator keys, i.e. all keys that are not frontal. + KeySet separatorKeys; for (auto &&factor : factors) { separatorKeys.insert(factor->begin(), factor->end()); - if (!factor->isContinuous()) { - for (auto &k : factor->discreteKeys()) { - mapFromKeyToDiscreteKey[k.first] = k; - } - } } - // remove frontals from separator for (auto &k : frontalKeys) { separatorKeys.erase(k); } - // Fill in discrete frontals and continuous frontals for the end result + // Build a map from keys to DiscreteKeys + auto mapFromKeyToDiscreteKey = factors.discreteKeyMap(); + + // Fill in discrete frontals and continuous frontals. + std::set discreteFrontals; + KeySet continuousFrontals; for (auto &k : frontalKeys) { if (mapFromKeyToDiscreteKey.find(k) != mapFromKeyToDiscreteKey.end()) { discreteFrontals.insert(mapFromKeyToDiscreteKey.at(k)); } else { continuousFrontals.insert(k); - allContinuousKeys.insert(k); } } - // Fill in discrete frontals and continuous frontals for the end result + // Fill in discrete discrete separator keys and continuous separator keys. + std::set discreteSeparatorSet; + KeyVector continuousSeparator; for (auto &k : separatorKeys) { if (mapFromKeyToDiscreteKey.find(k) != mapFromKeyToDiscreteKey.end()) { discreteSeparatorSet.insert(mapFromKeyToDiscreteKey.at(k)); } else { - continuousSeparator.insert(k); - allContinuousKeys.insert(k); + continuousSeparator.push_back(k); } } - // NOTE: We should really defer the product here because of pruning + // Check if we have any continuous keys: + const bool discrete_only = + continuousFrontals.empty() && continuousSeparator.empty(); - // Case 1: we are only dealing with continuous - if (mapFromKeyToDiscreteKey.empty() && !allContinuousKeys.empty()) { - return continuousElimination(factors, frontalKeys); - } + // NOTE: We should really defer the product here because of pruning - // Case 2: we are only dealing with discrete - if (allContinuousKeys.empty()) { + if (discrete_only) { + // Case 1: we are only dealing with discrete return discreteElimination(factors, frontalKeys); + } else if (mapFromKeyToDiscreteKey.empty()) { + // Case 2: we are only dealing with continuous + return continuousElimination(factors, frontalKeys); + } else { + // Case 3: We are now in the hybrid land! +#ifdef HYBRID_TIMING + tictoc_reset_(); +#endif + return hybridElimination(factors, frontalKeys, continuousSeparator, + discreteSeparatorSet); } - - // Case 3: We are now in the hybrid land! - return hybridElimination(factors, frontalKeys, continuousSeparator, - discreteSeparatorSet); } /* ************************************************************************ */ -void HybridGaussianFactorGraph::add(JacobianFactor &&factor) { - FactorGraph::add(boost::make_shared(std::move(factor))); -} - -/* ************************************************************************ */ -void HybridGaussianFactorGraph::add(JacobianFactor::shared_ptr factor) { - FactorGraph::add(boost::make_shared(factor)); -} +AlgebraicDecisionTree HybridGaussianFactorGraph::error( + const VectorValues &continuousValues) const { + AlgebraicDecisionTree error_tree(0.0); + + // Iterate over each factor. + for (auto &f : factors_) { + // TODO(dellaert): just use a virtual method defined in HybridFactor. + AlgebraicDecisionTree factor_error; + + if (auto gaussianMixture = dynamic_pointer_cast(f)) { + // Compute factor error and add it. + error_tree = error_tree + gaussianMixture->error(continuousValues); + } else if (auto gaussian = dynamic_pointer_cast(f)) { + // If continuous only, get the (double) error + // and add it to the error_tree + double error = gaussian->error(continuousValues); + // Add the gaussian factor error to every leaf of the error tree. + error_tree = error_tree.apply( + [error](double leaf_value) { return leaf_value + error; }); + } else if (dynamic_pointer_cast(f)) { + // If factor at `idx` is discrete-only, we skip. + continue; + } else { + throwRuntimeError("HybridGaussianFactorGraph::error(VV)", f); + } + } -/* ************************************************************************ */ -void HybridGaussianFactorGraph::add(DecisionTreeFactor &&factor) { - FactorGraph::add(boost::make_shared(std::move(factor))); + return error_tree; } /* ************************************************************************ */ -void HybridGaussianFactorGraph::add(DecisionTreeFactor::shared_ptr factor) { - FactorGraph::add(boost::make_shared(factor)); +double HybridGaussianFactorGraph::probPrime(const HybridValues &values) const { + double error = this->error(values); + // NOTE: The 0.5 term is handled by each factor + return std::exp(-error); } /* ************************************************************************ */ -const Ordering HybridGaussianFactorGraph::getHybridOrdering() const { - KeySet discrete_keys = discreteKeys(); - for (auto &factor : factors_) { - for (const DiscreteKey &k : factor->discreteKeys()) { - discrete_keys.insert(k.first); - } - } - - const VariableIndex index(factors_); - Ordering ordering = Ordering::ColamdConstrainedLast( - index, KeyVector(discrete_keys.begin(), discrete_keys.end()), true); - return ordering; +AlgebraicDecisionTree HybridGaussianFactorGraph::probPrime( + const VectorValues &continuousValues) const { + AlgebraicDecisionTree error_tree = this->error(continuousValues); + AlgebraicDecisionTree prob_tree = error_tree.apply([](double error) { + // NOTE: The 0.5 term is handled by each factor + return exp(-error); + }); + return prob_tree; } } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index 6a03625001..0db4f734b8 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -12,19 +12,20 @@ /** * @file HybridGaussianFactorGraph.h * @brief Linearized Hybrid factor graph that uses type erasure - * @author Fan Jiang + * @author Fan Jiang, Varun Agrawal, Frank Dellaert * @date Mar 11, 2022 */ #pragma once +#include #include #include -#include #include #include #include #include +#include namespace gtsam { @@ -36,25 +37,34 @@ class HybridEliminationTree; class HybridBayesTree; class HybridJunctionTree; class DecisionTreeFactor; - class JacobianFactor; +class HybridValues; /** * @brief Main elimination function for HybridGaussianFactorGraph. - * + * * @param factors The factor graph to eliminate. * @param keys The elimination ordering. * @return The conditional on the ordering keys and the remaining factors. * @ingroup hybrid */ GTSAM_EXPORT -std::pair, HybridFactor::shared_ptr> +std::pair, boost::shared_ptr> EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys); +/** + * @brief Return a Colamd constrained ordering where the discrete keys are + * eliminated after the continuous keys. + * + * @return const Ordering + */ +GTSAM_EXPORT const Ordering +HybridOrdering(const HybridGaussianFactorGraph& graph); + /* ************************************************************************* */ template <> struct EliminationTraits { - typedef HybridFactor FactorType; ///< Type of factors in factor graph + typedef Factor FactorType; ///< Type of factors in factor graph typedef HybridGaussianFactorGraph FactorGraphType; ///< Type of the factor graph (e.g. ///< HybridGaussianFactorGraph) @@ -68,17 +78,22 @@ struct EliminationTraits { typedef HybridJunctionTree JunctionTreeType; ///< Type of Junction tree /// The default dense elimination function static std::pair, - boost::shared_ptr > + boost::shared_ptr> DefaultEliminate(const FactorGraphType& factors, const Ordering& keys) { return EliminateHybrid(factors, keys); } + /// The default ordering generation function + static Ordering DefaultOrderingFunc( + const FactorGraphType& graph, + boost::optional variableIndex) { + return HybridOrdering(graph); + } }; /** * Hybrid Gaussian Factor Graph * ----------------------- * This is the linearized version of a hybrid factor graph. - * Everything inside needs to be hybrid factor or hybrid conditional. * * @ingroup hybrid */ @@ -99,11 +114,12 @@ class GTSAM_EXPORT HybridGaussianFactorGraph using shared_ptr = boost::shared_ptr; ///< shared_ptr to This using Values = gtsam::Values; ///< backwards compatibility - using Indices = KeyVector; ///> map from keys to values + using Indices = KeyVector; ///< map from keys to values /// @name Constructors /// @{ + /// @brief Default constructor. HybridGaussianFactorGraph() = default; /** @@ -116,67 +132,63 @@ class GTSAM_EXPORT HybridGaussianFactorGraph : Base(graph) {} /// @} + /// @name Testable + /// @{ - using Base::empty; - using Base::reserve; - using Base::size; - using Base::operator[]; - using Base::add; - using Base::push_back; - using Base::resize; - - /// Add a Jacobian factor to the factor graph. - void add(JacobianFactor&& factor); - - /// Add a Jacobian factor as a shared ptr. - void add(JacobianFactor::shared_ptr factor); + // TODO(dellaert): customize print and equals. + // void print(const std::string& s = "HybridGaussianFactorGraph", + // const KeyFormatter& keyFormatter = DefaultKeyFormatter) const + // override; + // bool equals(const This& fg, double tol = 1e-9) const override; - /// Add a DecisionTreeFactor to the factor graph. - void add(DecisionTreeFactor&& factor); + /// @} + /// @name Standard Interface + /// @{ - /// Add a DecisionTreeFactor as a shared ptr. - void add(DecisionTreeFactor::shared_ptr factor); + using Base::error; // Expose error(const HybridValues&) method.. /** - * Add a gaussian factor *pointer* to the internal gaussian factor graph - * @param gaussianFactor - boost::shared_ptr to the factor to add + * @brief Compute error for each discrete assignment, + * and return as a tree. + * + * Error \f$ e = \Vert x - \mu \Vert_{\Sigma} \f$. + * + * @param continuousValues Continuous values at which to compute the error. + * @return AlgebraicDecisionTree */ - template - IsGaussian push_gaussian( - const boost::shared_ptr& gaussianFactor) { - Base::push_back(boost::make_shared(gaussianFactor)); - } + AlgebraicDecisionTree error(const VectorValues& continuousValues) const; - /// Construct a factor and add (shared pointer to it) to factor graph. - template - IsGaussian emplace_gaussian(Args&&... args) { - auto factor = boost::allocate_shared( - Eigen::aligned_allocator(), std::forward(args)...); - push_gaussian(factor); - } + /** + * @brief Compute unnormalized probability \f$ P(X | M, Z) \f$ + * for each discrete assignment, and return as a tree. + * + * @param continuousValues Continuous values at which to compute the + * probability. + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree probPrime( + const VectorValues& continuousValues) const; /** - * @brief Add a single factor shared pointer to the hybrid factor graph. - * Dynamically handles the factor type and assigns it to the correct - * underlying container. + * @brief Compute the unnormalized posterior probability for a continuous + * vector values given a specific assignment. * - * @param sharedFactor The factor to add to this factor graph. + * @return double */ - void push_back(const SharedFactor& sharedFactor) { - if (auto p = boost::dynamic_pointer_cast(sharedFactor)) { - push_gaussian(p); - } else { - Base::push_back(sharedFactor); - } - } + double probPrime(const HybridValues& values) const; /** - * @brief Return a Colamd constrained ordering where the discrete keys are - * eliminated after the continuous keys. + * @brief Create a decision tree of factor graphs out of this hybrid factor + * graph. * - * @return const Ordering + * For example, if there are two mixture factors, one with a discrete key A + * and one with a discrete key B, then the decision tree will have two levels, + * one for A and one for B. The leaves of the tree will be the Gaussian + * factors that have only continuous keys. */ - const Ordering getHybridOrdering() const; + GaussianFactorGraphTree assembleGraphTree() const; + + /// @} }; } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianISAM.cpp b/gtsam/hybrid/HybridGaussianISAM.cpp index aa6b3f2664..3f63cb0896 100644 --- a/gtsam/hybrid/HybridGaussianISAM.cpp +++ b/gtsam/hybrid/HybridGaussianISAM.cpp @@ -43,7 +43,7 @@ Ordering HybridGaussianISAM::GetOrdering( HybridGaussianFactorGraph& factors, const HybridGaussianFactorGraph& newFactors) { // Get all the discrete keys from the factors - KeySet allDiscrete = factors.discreteKeys(); + const KeySet allDiscrete = factors.discreteKeySet(); // Create KeyVector with continuous keys followed by discrete keys. KeyVector newKeysDiscreteLast; diff --git a/gtsam/hybrid/HybridJunctionTree.cpp b/gtsam/hybrid/HybridJunctionTree.cpp index 422c200a43..a463c625f3 100644 --- a/gtsam/hybrid/HybridJunctionTree.cpp +++ b/gtsam/hybrid/HybridJunctionTree.cpp @@ -61,9 +61,15 @@ struct HybridConstructorTraversalData { parentData.junctionTreeNode->addChild(data.junctionTreeNode); // Add all the discrete keys in the hybrid factors to the current data - for (HybridFactor::shared_ptr& f : node->factors) { - for (auto& k : f->discreteKeys()) { - data.discreteKeys.insert(k.first); + for (const auto& f : node->factors) { + if (auto hf = boost::dynamic_pointer_cast(f)) { + for (auto& k : hf->discreteKeys()) { + data.discreteKeys.insert(k.first); + } + } else if (auto hf = boost::dynamic_pointer_cast(f)) { + for (auto& k : hf->discreteKeys()) { + data.discreteKeys.insert(k.first); + } } } diff --git a/gtsam/hybrid/HybridJunctionTree.h b/gtsam/hybrid/HybridJunctionTree.h index 4b0c369a82..29fa24809e 100644 --- a/gtsam/hybrid/HybridJunctionTree.h +++ b/gtsam/hybrid/HybridJunctionTree.h @@ -51,10 +51,11 @@ class HybridEliminationTree; */ class GTSAM_EXPORT HybridJunctionTree : public JunctionTree { + public: typedef JunctionTree Base; ///< Base class - typedef HybridJunctionTree This; ///< This class + typedef HybridJunctionTree This; ///< This class typedef boost::shared_ptr shared_ptr; ///< Shared pointer to this class /** diff --git a/gtsam/hybrid/HybridNonlinearFactor.cpp b/gtsam/hybrid/HybridNonlinearFactor.cpp deleted file mode 100644 index 5a1833d397..0000000000 --- a/gtsam/hybrid/HybridNonlinearFactor.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file HybridNonlinearFactor.cpp - * @date May 28, 2022 - * @author Varun Agrawal - */ - -#include - -#include - -namespace gtsam { - -/* ************************************************************************* */ -HybridNonlinearFactor::HybridNonlinearFactor( - const NonlinearFactor::shared_ptr &other) - : Base(other->keys()), inner_(other) {} - -/* ************************************************************************* */ -bool HybridNonlinearFactor::equals(const HybridFactor &lf, double tol) const { - return Base::equals(lf, tol); -} - -/* ************************************************************************* */ -void HybridNonlinearFactor::print(const std::string &s, - const KeyFormatter &formatter) const { - HybridFactor::print(s, formatter); - inner_->print("\n", formatter); -}; - -} // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactor.h b/gtsam/hybrid/HybridNonlinearFactor.h deleted file mode 100644 index 7776347b36..0000000000 --- a/gtsam/hybrid/HybridNonlinearFactor.h +++ /dev/null @@ -1,62 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file HybridNonlinearFactor.h - * @date May 28, 2022 - * @author Varun Agrawal - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - -/** - * A HybridNonlinearFactor is a layer over NonlinearFactor so that we do not - * have a diamond inheritance. - */ -class HybridNonlinearFactor : public HybridFactor { - private: - NonlinearFactor::shared_ptr inner_; - - public: - using Base = HybridFactor; - using This = HybridNonlinearFactor; - using shared_ptr = boost::shared_ptr; - - // Explicit conversion from a shared ptr of NonlinearFactor - explicit HybridNonlinearFactor(const NonlinearFactor::shared_ptr &other); - - /// @name Testable - /// @{ - - /// Check equality. - virtual bool equals(const HybridFactor &lf, double tol) const override; - - /// GTSAM print utility. - void print( - const std::string &s = "HybridNonlinearFactor\n", - const KeyFormatter &formatter = DefaultKeyFormatter) const override; - - /// @} - - NonlinearFactor::shared_ptr inner() const { return inner_; } - - /// Linearize to a HybridGaussianFactor at the linearization point `c`. - boost::shared_ptr linearize(const Values &c) const { - return boost::make_shared(inner_->linearize(c)); - } -}; -} // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp index 3a3bf720b7..71b064eb60 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -16,21 +16,14 @@ * @date May 28, 2022 */ +#include +#include #include +#include +#include namespace gtsam { -/* ************************************************************************* */ -void HybridNonlinearFactorGraph::add( - boost::shared_ptr factor) { - FactorGraph::add(boost::make_shared(factor)); -} - -/* ************************************************************************* */ -void HybridNonlinearFactorGraph::add(boost::shared_ptr factor) { - FactorGraph::add(boost::make_shared(factor)); -} - /* ************************************************************************* */ void HybridNonlinearFactorGraph::print(const std::string& s, const KeyFormatter& keyFormatter) const { @@ -50,47 +43,38 @@ void HybridNonlinearFactorGraph::print(const std::string& s, /* ************************************************************************* */ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( const Values& continuousValues) const { + using boost::dynamic_pointer_cast; + // create an empty linear FG auto linearFG = boost::make_shared(); linearFG->reserve(size()); // linearize all hybrid factors - for (auto&& factor : factors_) { + for (auto& f : factors_) { // First check if it is a valid factor - if (factor) { - // Check if the factor is a hybrid factor. - // It can be either a nonlinear MixtureFactor or a linear - // GaussianMixtureFactor. - if (factor->isHybrid()) { - // Check if it is a nonlinear mixture factor - if (auto nlmf = boost::dynamic_pointer_cast(factor)) { - linearFG->push_back(nlmf->linearize(continuousValues)); - } else { - linearFG->push_back(factor); - } - - // Now check if the factor is a continuous only factor. - } else if (factor->isContinuous()) { - // In this case, we check if factor->inner() is nonlinear since - // HybridFactors wrap over continuous factors. - auto nlhf = boost::dynamic_pointer_cast(factor); - if (auto nlf = - boost::dynamic_pointer_cast(nlhf->inner())) { - auto hgf = boost::make_shared( - nlf->linearize(continuousValues)); - linearFG->push_back(hgf); - } else { - linearFG->push_back(factor); - } - // Finally if nothing else, we are discrete-only which doesn't need - // lineariztion. - } else { - linearFG->push_back(factor); - } - - } else { + if (!f) { + // TODO(dellaert): why? linearFG->push_back(GaussianFactor::shared_ptr()); + continue; + } + // Check if it is a nonlinear mixture factor + if (auto mf = dynamic_pointer_cast(f)) { + const GaussianMixtureFactor::shared_ptr& gmf = + mf->linearize(continuousValues); + linearFG->push_back(gmf); + } else if (auto nlf = dynamic_pointer_cast(f)) { + const GaussianFactor::shared_ptr& gf = nlf->linearize(continuousValues); + linearFG->push_back(gf); + } else if (dynamic_pointer_cast(f)) { + // If discrete-only: doesn't need linearization. + linearFG->push_back(f); + } else { + auto& fr = *f; + throw std::invalid_argument( + std::string("HybridNonlinearFactorGraph::linearize: factor type " + "not handled: ") + + demangle(typeid(fr).name())); } } return linearFG; diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h index b48e8bb5c3..0cc87d5043 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.h +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -18,17 +18,12 @@ #pragma once -#include #include -#include -#include -#include -#include -#include -#include namespace gtsam { +class HybridGaussianFactorGraph; + /** * Nonlinear Hybrid Factor Graph * ----------------------- @@ -37,21 +32,6 @@ namespace gtsam { */ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { protected: - /// Check if FACTOR type is derived from NonlinearFactor. - template - using IsNonlinear = typename std::enable_if< - std::is_base_of::value>::type; - - /// Check if T has a value_type derived from FactorType. - template - using HasDerivedValueType = typename std::enable_if< - std::is_base_of::value>::type; - - /// Check if T has a pointer type derived from FactorType. - template - using HasDerivedElementType = typename std::enable_if::value>::type; - public: using Base = HybridFactorGraph; using This = HybridNonlinearFactorGraph; ///< this class @@ -75,76 +55,18 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { : Base(graph) {} /// @} - - // Allow use of selected FactorGraph methods: - using Base::empty; - using Base::reserve; - using Base::size; - using Base::operator[]; - using Base::add; - using Base::resize; - - /** - * Add a nonlinear factor *pointer* to the internal nonlinear factor graph - * @param nonlinearFactor - boost::shared_ptr to the factor to add - */ - template - IsNonlinear push_nonlinear( - const boost::shared_ptr& nonlinearFactor) { - Base::push_back(boost::make_shared(nonlinearFactor)); - } - - /// Construct a factor and add (shared pointer to it) to factor graph. - template - IsNonlinear emplace_nonlinear(Args&&... args) { - auto factor = boost::allocate_shared( - Eigen::aligned_allocator(), std::forward(args)...); - push_nonlinear(factor); - } - - /** - * @brief Add a single factor shared pointer to the hybrid factor graph. - * Dynamically handles the factor type and assigns it to the correct - * underlying container. - * - * @tparam FACTOR The factor type template - * @param sharedFactor The factor to add to this factor graph. - */ - template - void push_back(const boost::shared_ptr& sharedFactor) { - if (auto p = boost::dynamic_pointer_cast(sharedFactor)) { - push_nonlinear(p); - } else { - Base::push_back(sharedFactor); - } - } - - /** - * Push back many factors as shared_ptr's in a container (factors are not - * copied) - */ - template - HasDerivedElementType push_back(const CONTAINER& container) { - Base::push_back(container.begin(), container.end()); - } - - /// Push back non-pointer objects in a container (factors are copied). - template - HasDerivedValueType push_back(const CONTAINER& container) { - Base::push_back(container.begin(), container.end()); - } - - /// Add a nonlinear factor as a shared ptr. - void add(boost::shared_ptr factor); - - /// Add a discrete factor as a shared ptr. - void add(boost::shared_ptr factor); + /// @name Constructors + /// @{ /// Print the factor graph. void print( const std::string& s = "HybridNonlinearFactorGraph", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; + /// @} + /// @name Standard Interface + /// @{ + /** * @brief Linearize all the continuous factors in the * HybridNonlinearFactorGraph. @@ -152,8 +74,9 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { * @param continuousValues: Dictionary of continuous values. * @return HybridGaussianFactorGraph::shared_ptr */ - HybridGaussianFactorGraph::shared_ptr linearize( + boost::shared_ptr linearize( const Values& continuousValues) const; + /// @} }; template <> diff --git a/gtsam/hybrid/HybridNonlinearISAM.cpp b/gtsam/hybrid/HybridNonlinearISAM.cpp index 57e0daf8da..b7a5a8eeb3 100644 --- a/gtsam/hybrid/HybridNonlinearISAM.cpp +++ b/gtsam/hybrid/HybridNonlinearISAM.cpp @@ -39,6 +39,7 @@ void HybridNonlinearISAM::update(const HybridNonlinearFactorGraph& newFactors, if (newFactors.size() > 0) { // Reorder and relinearize every reorderInterval updates if (reorderInterval_ > 0 && ++reorderCounter_ >= reorderInterval_) { + // TODO(Varun) Relinearization doesn't take into account pruning reorder_relinearize(); reorderCounter_ = 0; } @@ -99,9 +100,11 @@ void HybridNonlinearISAM::print(const string& s, const KeyFormatter& keyFormatter) const { cout << s << "ReorderInterval: " << reorderInterval_ << " Current Count: " << reorderCounter_ << endl; - isam_.print("HybridGaussianISAM:\n", keyFormatter); + std::cout << "HybridGaussianISAM:" << std::endl; + isam_.print("", keyFormatter); linPoint_.print("Linearization Point:\n", keyFormatter); - factors_.print("Nonlinear Graph:\n", keyFormatter); + std::cout << "Nonlinear Graph:" << std::endl; + factors_.print("", keyFormatter); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridNonlinearISAM.h b/gtsam/hybrid/HybridNonlinearISAM.h index 47aa81c558..53bacb0fff 100644 --- a/gtsam/hybrid/HybridNonlinearISAM.h +++ b/gtsam/hybrid/HybridNonlinearISAM.h @@ -90,7 +90,7 @@ class GTSAM_EXPORT HybridNonlinearISAM { const Values& getLinearizationPoint() const { return linPoint_; } /** Return the current discrete assignment */ - const DiscreteValues& getAssignment() const { return assignment_; } + const DiscreteValues& assignment() const { return assignment_; } /** get underlying nonlinear graph */ const HybridNonlinearFactorGraph& getFactorsUnsafe() const { diff --git a/gtsam/hybrid/HybridSmoother.cpp b/gtsam/hybrid/HybridSmoother.cpp index 07a7a4e77a..35dd5f88bc 100644 --- a/gtsam/hybrid/HybridSmoother.cpp +++ b/gtsam/hybrid/HybridSmoother.cpp @@ -46,7 +46,7 @@ void HybridSmoother::update(HybridGaussianFactorGraph graph, } // Add the partial bayes net to the posterior bayes net. - hybridBayesNet_.push_back(*bayesNetFragment); + hybridBayesNet_.add(*bayesNetFragment); } /* ************************************************************************* */ @@ -100,8 +100,7 @@ HybridSmoother::addConditionals(const HybridGaussianFactorGraph &originalGraph, /* ************************************************************************* */ GaussianMixture::shared_ptr HybridSmoother::gaussianMixture( size_t index) const { - return boost::dynamic_pointer_cast( - hybridBayesNet_.at(index)); + return hybridBayesNet_.at(index)->asMixture(); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridValues.h b/gtsam/hybrid/HybridValues.h index 4928f9384f..94365db75a 100644 --- a/gtsam/hybrid/HybridValues.h +++ b/gtsam/hybrid/HybridValues.h @@ -37,11 +37,14 @@ namespace gtsam { */ class GTSAM_EXPORT HybridValues { private: - // DiscreteValue stored the discrete components of the HybridValues. + /// Continuous multi-dimensional vectors for \class GaussianFactor. + VectorValues continuous_; + + /// Discrete values for \class DiscreteFactor. DiscreteValues discrete_; - // VectorValue stored the continuous components of the HybridValues. - VectorValues continuous_; + /// Continuous, differentiable manifold values for \class NonlinearFactor. + Values nonlinear_; public: /// @name Standard Constructors @@ -51,8 +54,13 @@ class GTSAM_EXPORT HybridValues { HybridValues() = default; /// Construct from DiscreteValues and VectorValues. - HybridValues(const DiscreteValues& dv, const VectorValues& cv) - : discrete_(dv), continuous_(cv){}; + HybridValues(const VectorValues& cv, const DiscreteValues& dv) + : continuous_(cv), discrete_(dv){}; + + /// Construct from all values types. + HybridValues(const VectorValues& cv, const DiscreteValues& dv, + const Values& v) + : continuous_(cv), discrete_(dv), nonlinear_(v){}; /// @} /// @name Testable @@ -62,41 +70,45 @@ class GTSAM_EXPORT HybridValues { void print(const std::string& s = "HybridValues", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { std::cout << s << ": \n"; - discrete_.print(" Discrete", keyFormatter); // print discrete components continuous_.print(" Continuous", - keyFormatter); // print continuous components + keyFormatter); // print continuous components + discrete_.print(" Discrete", keyFormatter); // print discrete components }; /// equals required by Testable for unit testing bool equals(const HybridValues& other, double tol = 1e-9) const { - return discrete_.equals(other.discrete_, tol) && - continuous_.equals(other.continuous_, tol); + return continuous_.equals(other.continuous_, tol) && + discrete_.equals(other.discrete_, tol); } /// @} /// @name Interface /// @{ - /// Return the discrete MPE assignment - DiscreteValues discrete() const { return discrete_; } + /// Return the multi-dimensional vector values. + const VectorValues& continuous() const { return continuous_; } - /// Return the delta update for the continuous vectors - VectorValues continuous() const { return continuous_; } + /// Return the discrete values. + const DiscreteValues& discrete() const { return discrete_; } - /// Check whether a variable with key \c j exists in DiscreteValue. - bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); }; + /// Return the nonlinear values. + const Values& nonlinear() const { return nonlinear_; } - /// Check whether a variable with key \c j exists in VectorValue. + /// Check whether a variable with key \c j exists in VectorValues. bool existsVector(Key j) { return continuous_.exists(j); }; - /// Check whether a variable with key \c j exists. - bool exists(Key j) { return existsDiscrete(j) || existsVector(j); }; + /// Check whether a variable with key \c j exists in DiscreteValues. + bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); }; - /** Insert a discrete \c value with key \c j. Replaces the existing value if - * the key \c j is already used. - * @param value The vector to be inserted. - * @param j The index with which the value will be associated. */ - void insert(Key j, int value) { discrete_[j] = value; }; + /// Check whether a variable with key \c j exists in values. + bool existsNonlinear(Key j) { + return nonlinear_.exists(j); + }; + + /// Check whether a variable with key \c j exists. + bool exists(Key j) { + return existsVector(j) || existsDiscrete(j) || existsNonlinear(j); + }; /** Insert a vector \c value with key \c j. Throws an invalid_argument * exception if the key \c j is already used. @@ -104,20 +116,102 @@ class GTSAM_EXPORT HybridValues { * @param j The index with which the value will be associated. */ void insert(Key j, const Vector& value) { continuous_.insert(j, value); } - // TODO(Shangjie)- update() and insert_or_assign() , similar to Values.h + /** Insert a discrete \c value with key \c j. Replaces the existing value if + * the key \c j is already used. + * @param value The vector to be inserted. + * @param j The index with which the value will be associated. */ + void insert(Key j, size_t value) { discrete_[j] = value; }; + + /// insert_or_assign() , similar to Values.h + void insert_or_assign(Key j, const Vector& value) { + continuous_.insert_or_assign(j, value); + } + + /// insert_or_assign() , similar to Values.h + void insert_or_assign(Key j, size_t value) { + discrete_[j] = value; + } + + /** Insert all continuous values from \c values. Throws an invalid_argument + * exception if any keys to be inserted are already used. */ + HybridValues& insert(const VectorValues& values) { + continuous_.insert(values); + return *this; + } + + /** Insert all discrete values from \c values. Throws an invalid_argument + * exception if any keys to be inserted are already used. */ + HybridValues& insert(const DiscreteValues& values) { + discrete_.insert(values); + return *this; + } + + /** Insert all values from \c values. Throws an invalid_argument + * exception if any keys to be inserted are already used. */ + HybridValues& insert(const Values& values) { + nonlinear_.insert(values); + return *this; + } + + /** Insert all values from \c values. Throws an invalid_argument exception if + * any keys to be inserted are already used. */ + HybridValues& insert(const HybridValues& values) { + continuous_.insert(values.continuous()); + discrete_.insert(values.discrete()); + nonlinear_.insert(values.nonlinear()); + return *this; + } /** - * Read/write access to the discrete value with key \c j, throws + * Read/write access to the vector value with key \c j, throws * std::out_of_range if \c j does not exist. */ - size_t& atDiscrete(Key j) { return discrete_.at(j); }; + Vector& at(Key j) { return continuous_.at(j); }; /** - * Read/write access to the vector value with key \c j, throws + * Read/write access to the discrete value with key \c j, throws * std::out_of_range if \c j does not exist. */ - Vector& at(Key j) { return continuous_.at(j); }; + size_t& atDiscrete(Key j) { return discrete_.at(j); }; + /** For all key/value pairs in \c values, replace continuous values with + * corresponding keys in this object with those in \c values. Throws + * std::out_of_range if any keys in \c values are not present in this object. + */ + HybridValues& update(const VectorValues& values) { + continuous_.update(values); + return *this; + } + + /** For all key/value pairs in \c values, replace discrete values with + * corresponding keys in this object with those in \c values. Throws + * std::out_of_range if any keys in \c values are not present in this object. + */ + HybridValues& update(const DiscreteValues& values) { + discrete_.update(values); + return *this; + } + + /** For all key/value pairs in \c values, replace all values with + * corresponding keys in this object with those in \c values. Throws + * std::out_of_range if any keys in \c values are not present in this object. + */ + HybridValues& update(const HybridValues& values) { + continuous_.update(values.continuous()); + discrete_.update(values.discrete()); + return *this; + } + + /// Extract continuous values with given keys. + VectorValues continuousSubset(const KeyVector& keys) const { + VectorValues measurements; + for (const auto& key : keys) { + measurements.insert(key, continuous_.at(key)); + } + return measurements; + } + + /// @} /// @name Wrapper support /// @{ @@ -130,8 +224,8 @@ class GTSAM_EXPORT HybridValues { std::string html( const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { std::stringstream ss; - ss << this->discrete_.html(keyFormatter); ss << this->continuous_.html(keyFormatter); + ss << this->discrete_.html(keyFormatter); return ss.str(); }; diff --git a/gtsam/hybrid/MixtureFactor.h b/gtsam/hybrid/MixtureFactor.h index 5e7337d0cc..220f1bdbf9 100644 --- a/gtsam/hybrid/MixtureFactor.h +++ b/gtsam/hybrid/MixtureFactor.h @@ -21,8 +21,9 @@ #include #include -#include +#include #include +#include #include #include @@ -86,11 +87,11 @@ class MixtureFactor : public HybridFactor { * elements based on the number of discrete keys and the cardinality of the * keys, so that the decision tree is constructed appropriately. * - * @tparam FACTOR The type of the factor shared pointers being passed in. Will - * be typecast to NonlinearFactor shared pointers. + * @tparam FACTOR The type of the factor shared pointers being passed in. + * Will be typecast to NonlinearFactor shared pointers. * @param keys Vector of keys for continuous factors. * @param discreteKeys Vector of discrete keys. - * @param factors Vector of shared pointers to factors. + * @param factors Vector of nonlinear factors. * @param normalized Flag indicating if the factor error is already * normalized. */ @@ -107,8 +108,12 @@ class MixtureFactor : public HybridFactor { std::copy(f->keys().begin(), f->keys().end(), std::inserter(factor_keys_set, factor_keys_set.end())); - nonlinear_factors.push_back( - boost::dynamic_pointer_cast(f)); + if (auto nf = boost::dynamic_pointer_cast(f)) { + nonlinear_factors.push_back(nf); + } else { + throw std::runtime_error( + "Factors passed into MixtureFactor need to be nonlinear!"); + } } factors_ = Factors(discreteKeys, nonlinear_factors); @@ -121,27 +126,60 @@ class MixtureFactor : public HybridFactor { ~MixtureFactor() = default; + /** + * @brief Compute error of the MixtureFactor as a tree. + * + * @param continuousValues The continuous values for which to compute the + * error. + * @return AlgebraicDecisionTree A decision tree with the same keys + * as the factor, and leaf values as the error. + */ + AlgebraicDecisionTree error(const Values& continuousValues) const { + // functor to convert from sharedFactor to double error value. + auto errorFunc = [continuousValues](const sharedFactor& factor) { + return factor->error(continuousValues); + }; + DecisionTree errorTree(factors_, errorFunc); + return errorTree; + } + /** * @brief Compute error of factor given both continuous and discrete values. * - * @param continuousVals The continuous Values. - * @param discreteVals The discrete Values. + * @param continuousValues The continuous Values. + * @param discreteValues The discrete Values. * @return double The error of this factor. */ - double error(const Values& continuousVals, - const DiscreteValues& discreteVals) const { - // Retrieve the factor corresponding to the assignment in discreteVals. - auto factor = factors_(discreteVals); + double error(const Values& continuousValues, + const DiscreteValues& discreteValues) const { + // Retrieve the factor corresponding to the assignment in discreteValues. + auto factor = factors_(discreteValues); // Compute the error for the selected factor - const double factorError = factor->error(continuousVals); + const double factorError = factor->error(continuousValues); if (normalized_) return factorError; - return factorError + - this->nonlinearFactorLogNormalizingConstant(factor, continuousVals); + return factorError + this->nonlinearFactorLogNormalizingConstant( + factor, continuousValues); } + /** + * @brief Compute error of factor given hybrid values. + * + * @param values The continuous Values and the discrete assignment. + * @return double The error of this factor. + */ + double error(const HybridValues& values) const override { + return error(values.nonlinear(), values.discrete()); + } + + /** + * @brief Get the dimension of the factor (number of rows on linearization). + * Returns the dimension of the first component factor. + * @return size_t + */ size_t dim() const { - // TODO(Varun) - throw std::runtime_error("MixtureFactor::dim not implemented."); + const auto assignments = DiscreteValues::CartesianProduct(discreteKeys_); + auto factor = factors_(assignments.at(0)); + return factor->dim(); } /// Testable @@ -149,7 +187,7 @@ class MixtureFactor : public HybridFactor { /// print to stdout void print( - const std::string& s = "MixtureFactor", + const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << (s.empty() ? "" : s + " "); Base::print("", keyFormatter); @@ -192,17 +230,18 @@ class MixtureFactor : public HybridFactor { /// Linearize specific nonlinear factors based on the assignment in /// discreteValues. GaussianFactor::shared_ptr linearize( - const Values& continuousVals, const DiscreteValues& discreteVals) const { - auto factor = factors_(discreteVals); - return factor->linearize(continuousVals); + const Values& continuousValues, + const DiscreteValues& discreteValues) const { + auto factor = factors_(discreteValues); + return factor->linearize(continuousValues); } /// Linearize all the continuous factors to get a GaussianMixtureFactor. boost::shared_ptr linearize( - const Values& continuousVals) const { + const Values& continuousValues) const { // functional to linearize each factor in the decision tree - auto linearizeDT = [continuousVals](const sharedFactor& factor) { - return factor->linearize(continuousVals); + auto linearizeDT = [continuousValues](const sharedFactor& factor) { + return factor->linearize(continuousValues); }; DecisionTree linearized_factors( diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i index 86029a48aa..7cc93d4b0f 100644 --- a/gtsam/hybrid/hybrid.i +++ b/gtsam/hybrid/hybrid.i @@ -6,16 +6,30 @@ namespace gtsam { #include class HybridValues { - gtsam::DiscreteValues discrete() const; gtsam::VectorValues continuous() const; + gtsam::DiscreteValues discrete() const; + HybridValues(); - HybridValues(const gtsam::DiscreteValues &dv, const gtsam::VectorValues &cv); + HybridValues(const gtsam::VectorValues &cv, const gtsam::DiscreteValues &dv); void print(string s = "HybridValues", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; bool equals(const gtsam::HybridValues& other, double tol) const; + void insert(gtsam::Key j, int value); void insert(gtsam::Key j, const gtsam::Vector& value); + + void insert_or_assign(gtsam::Key j, const gtsam::Vector& value); + void insert_or_assign(gtsam::Key j, size_t value); + + void insert(const gtsam::VectorValues& values); + void insert(const gtsam::DiscreteValues& values); + void insert(const gtsam::HybridValues& values); + + void update(const gtsam::VectorValues& values); + void update(const gtsam::DiscreteValues& values); + void update(const gtsam::HybridValues& values); + size_t& atDiscrete(gtsam::Key j); gtsam::Vector& at(gtsam::Key j); }; @@ -29,6 +43,15 @@ virtual class HybridFactor { bool empty() const; size_t size() const; gtsam::KeyVector keys() const; + + // Standard interface: + double error(const gtsam::HybridValues &values) const; + bool isDiscrete() const; + bool isContinuous() const; + bool isHybrid() const; + size_t nrContinuous() const; + gtsam::DiscreteKeys discreteKeys() const; + gtsam::KeyVector continuousKeys() const; }; #include @@ -39,22 +62,22 @@ virtual class HybridConditional { bool equals(const gtsam::HybridConditional& other, double tol = 1e-9) const; size_t nrFrontals() const; size_t nrParents() const; - gtsam::Factor* inner(); -}; -#include -virtual class HybridDiscreteFactor { - HybridDiscreteFactor(gtsam::DecisionTreeFactor dtf); - void print(string s = "HybridDiscreteFactor\n", - const gtsam::KeyFormatter& keyFormatter = - gtsam::DefaultKeyFormatter) const; - bool equals(const gtsam::HybridDiscreteFactor& other, double tol = 1e-9) const; + // Standard interface: + double logNormalizationConstant() const; + double logProbability(const gtsam::HybridValues& values) const; + double evaluate(const gtsam::HybridValues& values) const; + double operator()(const gtsam::HybridValues& values) const; + gtsam::GaussianMixture* asMixture() const; + gtsam::GaussianConditional* asGaussian() const; + gtsam::DiscreteConditional* asDiscrete() const; gtsam::Factor* inner(); + double error(const gtsam::HybridValues& values) const; }; #include class GaussianMixtureFactor : gtsam::HybridFactor { - static GaussianMixtureFactor FromFactors( + GaussianMixtureFactor( const gtsam::KeyVector& continuousKeys, const gtsam::DiscreteKeys& discreteKeys, const std::vector& factorsList); @@ -66,12 +89,13 @@ class GaussianMixtureFactor : gtsam::HybridFactor { #include class GaussianMixture : gtsam::HybridFactor { - static GaussianMixture FromConditionals( - const gtsam::KeyVector& continuousFrontals, - const gtsam::KeyVector& continuousParents, - const gtsam::DiscreteKeys& discreteParents, - const std::vector& - conditionalsList); + GaussianMixture(const gtsam::KeyVector& continuousFrontals, + const gtsam::KeyVector& continuousParents, + const gtsam::DiscreteKeys& discreteParents, + const std::vector& + conditionalsList); + + gtsam::GaussianMixtureFactor* likelihood(const gtsam::VectorValues &frontals) const; void print(string s = "GaussianMixture\n", const gtsam::KeyFormatter& keyFormatter = @@ -87,7 +111,6 @@ class HybridBayesTreeClique { // double evaluate(const gtsam::HybridValues& values) const; }; -#include class HybridBayesTree { HybridBayesTree(); void print(string s = "HybridBayesTree\n", @@ -105,14 +128,30 @@ class HybridBayesTree { gtsam::DefaultKeyFormatter) const; }; +#include class HybridBayesNet { HybridBayesNet(); - void add(const gtsam::HybridConditional& s); + void push_back(const gtsam::GaussianMixture* s); + void push_back(const gtsam::GaussianConditional* s); + void push_back(const gtsam::DiscreteConditional* s); + bool empty() const; size_t size() const; gtsam::KeySet keys() const; const gtsam::HybridConditional* at(size_t i) const; + + // Standard interface: + double logProbability(const gtsam::HybridValues& values) const; + double evaluate(const gtsam::HybridValues& values) const; + double error(const gtsam::HybridValues& values) const; + + gtsam::HybridGaussianFactorGraph toFactorGraph( + const gtsam::VectorValues& measurements) const; + gtsam::HybridValues optimize() const; + gtsam::HybridValues sample(const gtsam::HybridValues &given) const; + gtsam::HybridValues sample() const; + void print(string s = "HybridBayesNet\n", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; @@ -139,9 +178,8 @@ class HybridGaussianFactorGraph { void push_back(const gtsam::HybridBayesNet& bayesNet); void push_back(const gtsam::HybridBayesTree& bayesTree); void push_back(const gtsam::GaussianMixtureFactor* gmm); - - void add(gtsam::DecisionTreeFactor* factor); - void add(gtsam::JacobianFactor* factor); + void push_back(gtsam::DecisionTreeFactor* factor); + void push_back(gtsam::JacobianFactor* factor); bool empty() const; void remove(size_t i); @@ -152,6 +190,10 @@ class HybridGaussianFactorGraph { void print(string s = "") const; bool equals(const gtsam::HybridGaussianFactorGraph& fg, double tol = 1e-9) const; + // evaluation + double error(const gtsam::HybridValues& values) const; + double probPrime(const gtsam::HybridValues& values) const; + gtsam::HybridBayesNet* eliminateSequential(); gtsam::HybridBayesNet* eliminateSequential( gtsam::Ordering::OrderingType type); @@ -178,9 +220,7 @@ class HybridNonlinearFactorGraph { HybridNonlinearFactorGraph(const gtsam::HybridNonlinearFactorGraph& graph); void push_back(gtsam::HybridFactor* factor); void push_back(gtsam::NonlinearFactor* factor); - void push_back(gtsam::HybridDiscreteFactor* factor); - void add(gtsam::NonlinearFactor* factor); - void add(gtsam::DiscreteFactor* factor); + void push_back(gtsam::DiscreteFactor* factor); gtsam::HybridGaussianFactorGraph linearize(const gtsam::Values& continuousValues) const; bool empty() const; @@ -196,22 +236,24 @@ class HybridNonlinearFactorGraph { #include class MixtureFactor : gtsam::HybridFactor { - MixtureFactor(const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys, - const gtsam::DecisionTree& factors, bool normalized = false); + MixtureFactor( + const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys, + const gtsam::DecisionTree& factors, + bool normalized = false); template MixtureFactor(const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys, const std::vector& factors, bool normalized = false); - double error(const gtsam::Values& continuousVals, - const gtsam::DiscreteValues& discreteVals) const; + double error(const gtsam::Values& continuousValues, + const gtsam::DiscreteValues& discreteValues) const; double nonlinearFactorLogNormalizingConstant(const gtsam::NonlinearFactor* factor, const gtsam::Values& values) const; GaussianMixtureFactor* linearize( - const gtsam::Values& continuousVals) const; + const gtsam::Values& continuousValues) const; void print(string s = "MixtureFactor\n", const gtsam::KeyFormatter& keyFormatter = diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index f9e1916d07..ab7a9a84f6 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -57,7 +57,7 @@ inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain( // keyFunc(1) to keyFunc(n+1) for (size_t t = 1; t < n; t++) { - hfg.add(GaussianMixtureFactor::FromFactors( + hfg.add(GaussianMixtureFactor( {keyFunc(t), keyFunc(t + 1)}, {{dKeyFunc(t), 2}}, {boost::make_shared(keyFunc(t), I_3x3, keyFunc(t + 1), I_3x3, Z_3x1), @@ -130,10 +130,14 @@ struct Switching { * @param K The total number of timesteps. * @param between_sigma The stddev between poses. * @param prior_sigma The stddev on priors (also used for measurements). + * @param measurements Vector of measurements for each timestep. */ Switching(size_t K, double between_sigma = 1.0, double prior_sigma = 0.1, - std::vector measurements = {}) + std::vector measurements = {}, + std::string discrete_transition_prob = "1/2 3/2") : K(K) { + using noiseModel::Isotropic; + // Create DiscreteKeys for binary K modes. for (size_t k = 0; k < K; k++) { modes.emplace_back(M(k), 2); @@ -147,10 +151,9 @@ struct Switching { } // Create hybrid factor graph. - // Add a prior on X(1). - auto prior = boost::make_shared>( - X(0), measurements.at(0), noiseModel::Isotropic::Sigma(1, prior_sigma)); - nonlinearFactorGraph.push_nonlinear(prior); + // Add a prior on X(0). + nonlinearFactorGraph.emplace_shared>( + X(0), measurements.at(0), Isotropic::Sigma(1, prior_sigma)); // Add "motion models". for (size_t k = 0; k < K - 1; k++) { @@ -160,19 +163,19 @@ struct Switching { for (auto &&f : motion_models) { components.push_back(boost::dynamic_pointer_cast(f)); } - nonlinearFactorGraph.emplace_hybrid( + nonlinearFactorGraph.emplace_shared( keys, DiscreteKeys{modes[k]}, components); } // Add measurement factors - auto measurement_noise = noiseModel::Isotropic::Sigma(1, prior_sigma); + auto measurement_noise = Isotropic::Sigma(1, prior_sigma); for (size_t k = 1; k < K; k++) { - nonlinearFactorGraph.emplace_nonlinear>( + nonlinearFactorGraph.emplace_shared>( X(k), measurements.at(k), measurement_noise); } // Add "mode chain" - addModeChain(&nonlinearFactorGraph); + addModeChain(&nonlinearFactorGraph, discrete_transition_prob); // Create the linearization point. for (size_t k = 0; k < K; k++) { @@ -201,14 +204,13 @@ struct Switching { * * @param fg The nonlinear factor graph to which the mode chain is added. */ - void addModeChain(HybridNonlinearFactorGraph *fg) { - auto prior = boost::make_shared(modes[0], "1/1"); - fg->push_discrete(prior); + void addModeChain(HybridNonlinearFactorGraph *fg, + std::string discrete_transition_prob = "1/2 3/2") { + fg->emplace_shared(modes[0], "1/1"); for (size_t k = 0; k < K - 2; k++) { auto parents = {modes[k]}; - auto conditional = boost::make_shared( - modes[k + 1], parents, "1/2 3/2"); - fg->push_discrete(conditional); + fg->emplace_shared(modes[k + 1], parents, + discrete_transition_prob); } } @@ -218,14 +220,13 @@ struct Switching { * * @param fg The gaussian factor graph to which the mode chain is added. */ - void addModeChain(HybridGaussianFactorGraph *fg) { - auto prior = boost::make_shared(modes[0], "1/1"); - fg->push_discrete(prior); + void addModeChain(HybridGaussianFactorGraph *fg, + std::string discrete_transition_prob = "1/2 3/2") { + fg->emplace_shared(modes[0], "1/1"); for (size_t k = 0; k < K - 2; k++) { auto parents = {modes[k]}; - auto conditional = boost::make_shared( - modes[k + 1], parents, "1/2 3/2"); - fg->push_discrete(conditional); + fg->emplace_shared(modes[k + 1], parents, + discrete_transition_prob); } } }; diff --git a/gtsam/hybrid/tests/TinyHybridExample.h b/gtsam/hybrid/tests/TinyHybridExample.h new file mode 100644 index 0000000000..0ef8955c4f --- /dev/null +++ b/gtsam/hybrid/tests/TinyHybridExample.h @@ -0,0 +1,87 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2023, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * @file TinyHybridExample.h + * @date December, 2022 + * @author Frank Dellaert + */ + +#include +#include +#include +#pragma once + +namespace gtsam { +namespace tiny { + +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +// Create mode key: 0 is low-noise, 1 is high-noise. +const DiscreteKey mode{M(0), 2}; + +/** + * Create a tiny two variable hybrid model which represents + * the generative probability P(z,x,mode) = P(z|x,mode)P(x)P(mode). + * num_measurements is the number of measurements of the continuous variable x0. + * If manyModes is true, then we introduce one mode per measurement. + */ +inline HybridBayesNet createHybridBayesNet(size_t num_measurements = 1, + bool manyModes = false) { + HybridBayesNet bayesNet; + + // Create Gaussian mixture z_i = x0 + noise for each measurement. + for (size_t i = 0; i < num_measurements; i++) { + const auto mode_i = manyModes ? DiscreteKey{M(i), 2} : mode; + bayesNet.emplace_back( + new GaussianMixture({Z(i)}, {X(0)}, {mode_i}, + {GaussianConditional::sharedMeanAndStddev( + Z(i), I_1x1, X(0), Z_1x1, 0.5), + GaussianConditional::sharedMeanAndStddev( + Z(i), I_1x1, X(0), Z_1x1, 3)})); + } + + // Create prior on X(0). + bayesNet.push_back( + GaussianConditional::sharedMeanAndStddev(X(0), Vector1(5.0), 0.5)); + + // Add prior on mode. + const size_t nrModes = manyModes ? num_measurements : 1; + for (size_t i = 0; i < nrModes; i++) { + bayesNet.emplace_back(new DiscreteConditional({M(i), 2}, "4/6")); + } + return bayesNet; +} + +/** + * Create a tiny two variable hybrid factor graph which represents a discrete + * mode and a continuous variable x0, given a number of measurements of the + * continuous variable x0. If no measurements are given, they are sampled from + * the generative Bayes net model HybridBayesNet::Example(num_measurements) + */ +inline HybridGaussianFactorGraph createHybridGaussianFactorGraph( + size_t num_measurements = 1, + boost::optional measurements = boost::none, + bool manyModes = false) { + auto bayesNet = createHybridBayesNet(num_measurements, manyModes); + if (measurements) { + // Use the measurements to create a hybrid factor graph. + return bayesNet.toFactorGraph(*measurements); + } else { + // Sample from the generative model to create a hybrid factor graph. + return bayesNet.toFactorGraph(bayesNet.sample().continuous()); + } +} + +} // namespace tiny +} // namespace gtsam diff --git a/gtsam/hybrid/tests/testGaussianMixture.cpp b/gtsam/hybrid/tests/testGaussianMixture.cpp index 420e22315b..7a4439e211 100644 --- a/gtsam/hybrid/tests/testGaussianMixture.cpp +++ b/gtsam/hybrid/tests/testGaussianMixture.cpp @@ -20,6 +20,8 @@ #include #include +#include +#include #include #include @@ -28,63 +30,193 @@ // Include for test suite #include -using namespace std; using namespace gtsam; using noiseModel::Isotropic; using symbol_shorthand::M; using symbol_shorthand::X; +using symbol_shorthand::Z; + +// Common constants +static const Key modeKey = M(0); +static const DiscreteKey mode(modeKey, 2); +static const VectorValues vv{{Z(0), Vector1(4.9)}, {X(0), Vector1(5.0)}}; +static const DiscreteValues assignment0{{M(0), 0}}, assignment1{{M(0), 1}}; +static const HybridValues hv0{vv, assignment0}; +static const HybridValues hv1{vv, assignment1}; /* ************************************************************************* */ -/* Check construction of GaussianMixture P(x1 | x2, m1) as well as accessing a - * specific mode i.e. P(x1 | x2, m1=1). - */ -TEST(GaussianMixture, Equals) { - // create a conditional gaussian node - Matrix S1(2, 2); - S1(0, 0) = 1; - S1(1, 0) = 2; - S1(0, 1) = 3; - S1(1, 1) = 4; - - Matrix S2(2, 2); - S2(0, 0) = 6; - S2(1, 0) = 0.2; - S2(0, 1) = 8; - S2(1, 1) = 0.4; - - Matrix R1(2, 2); - R1(0, 0) = 0.1; - R1(1, 0) = 0.3; - R1(0, 1) = 0.0; - R1(1, 1) = 0.34; - - Matrix R2(2, 2); - R2(0, 0) = 0.1; - R2(1, 0) = 0.3; - R2(0, 1) = 0.0; - R2(1, 1) = 0.34; - - SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector2(1.0, 0.34)); - - Vector2 d1(0.2, 0.5), d2(0.5, 0.2); - - auto conditional0 = boost::make_shared(X(1), d1, R1, - X(2), S1, model), - conditional1 = boost::make_shared(X(1), d2, R2, - X(2), S2, model); - - // Create decision tree - DiscreteKey m1(1, 2); - GaussianMixture::Conditionals conditionals( - {m1}, - vector{conditional0, conditional1}); - GaussianMixture mixtureFactor({X(1)}, {X(2)}, {m1}, conditionals); - - // Let's check that this worked: - DiscreteValues mode; - mode[m1.first] = 1; - auto actual = mixtureFactor(mode); - EXPECT(actual == conditional1); +namespace equal_constants { +// Create a simple GaussianMixture +const double commonSigma = 2.0; +const std::vector conditionals{ + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + commonSigma), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + commonSigma)}; +const GaussianMixture mixture({Z(0)}, {X(0)}, {mode}, conditionals); +} // namespace equal_constants + +/* ************************************************************************* */ +/// Check that invariants hold +TEST(GaussianMixture, Invariants) { + using namespace equal_constants; + + // Check that the mixture normalization constant is the max of all constants + // which are all equal, in this case, hence: + const double K = mixture.logNormalizationConstant(); + EXPECT_DOUBLES_EQUAL(K, conditionals[0]->logNormalizationConstant(), 1e-8); + EXPECT_DOUBLES_EQUAL(K, conditionals[1]->logNormalizationConstant(), 1e-8); + + EXPECT(GaussianMixture::CheckInvariants(mixture, hv0)); + EXPECT(GaussianMixture::CheckInvariants(mixture, hv1)); +} + +/* ************************************************************************* */ +/// Check LogProbability. +TEST(GaussianMixture, LogProbability) { + using namespace equal_constants; + auto actual = mixture.logProbability(vv); + + // Check result. + std::vector discrete_keys = {mode}; + std::vector leaves = {conditionals[0]->logProbability(vv), + conditionals[1]->logProbability(vv)}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + + EXPECT(assert_equal(expected, actual, 1e-6)); + + // Check for non-tree version. + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + EXPECT_DOUBLES_EQUAL(conditionals[mode]->logProbability(vv), + mixture.logProbability(hv), 1e-8); + } +} + +/* ************************************************************************* */ +/// Check error. +TEST(GaussianMixture, Error) { + using namespace equal_constants; + auto actual = mixture.error(vv); + + // Check result. + std::vector discrete_keys = {mode}; + std::vector leaves = {conditionals[0]->error(vv), + conditionals[1]->error(vv)}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + + EXPECT(assert_equal(expected, actual, 1e-6)); + + // Check for non-tree version. + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), mixture.error(hv), + 1e-8); + } +} + +/* ************************************************************************* */ +/// Check that the likelihood is proportional to the conditional density given +/// the measurements. +TEST(GaussianMixture, Likelihood) { + using namespace equal_constants; + + // Compute likelihood + auto likelihood = mixture.likelihood(vv); + + // Check that the mixture error and the likelihood error are the same. + EXPECT_DOUBLES_EQUAL(mixture.error(hv0), likelihood->error(hv0), 1e-8); + EXPECT_DOUBLES_EQUAL(mixture.error(hv1), likelihood->error(hv1), 1e-8); + + // Check that likelihood error is as expected, i.e., just the errors of the + // individual likelihoods, in the `equal_constants` case. + std::vector discrete_keys = {mode}; + std::vector leaves = {conditionals[0]->likelihood(vv)->error(vv), + conditionals[1]->likelihood(vv)->error(vv)}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + EXPECT(assert_equal(expected, likelihood->error(vv), 1e-6)); + + // Check that the ratio of probPrime to evaluate is the same for all modes. + std::vector ratio(2); + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + ratio[mode] = std::exp(-likelihood->error(hv)) / mixture.evaluate(hv); + } + EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); +} + +/* ************************************************************************* */ +namespace mode_dependent_constants { +// Create a GaussianMixture with mode-dependent noise models. +// 0 is low-noise, 1 is high-noise. +const std::vector conditionals{ + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + 0.5), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + 3.0)}; +const GaussianMixture mixture({Z(0)}, {X(0)}, {mode}, conditionals); +} // namespace mode_dependent_constants + +/* ************************************************************************* */ +// Create a test for continuousParents. +TEST(GaussianMixture, ContinuousParents) { + using namespace mode_dependent_constants; + const KeyVector continuousParentKeys = mixture.continuousParents(); + // Check that the continuous parent keys are correct: + EXPECT(continuousParentKeys.size() == 1); + EXPECT(continuousParentKeys[0] == X(0)); +} + +/* ************************************************************************* */ +/// Check that the likelihood is proportional to the conditional density given +/// the measurements. +TEST(GaussianMixture, Likelihood2) { + using namespace mode_dependent_constants; + + // Compute likelihood + auto likelihood = mixture.likelihood(vv); + + // Check that the mixture error and the likelihood error are as expected, + // this invariant is the same as the equal noise case: + EXPECT_DOUBLES_EQUAL(mixture.error(hv0), likelihood->error(hv0), 1e-8); + EXPECT_DOUBLES_EQUAL(mixture.error(hv1), likelihood->error(hv1), 1e-8); + + // Check the detailed JacobianFactor calculation for mode==1. + { + // We have a JacobianFactor + const auto gf1 = (*likelihood)(assignment1); + const auto jf1 = boost::dynamic_pointer_cast(gf1); + CHECK(jf1); + + // It has 2 rows, not 1! + CHECK(jf1->rows() == 2); + + // Check that the constant C1 is properly encoded in the JacobianFactor. + const double C1 = mixture.logNormalizationConstant() - + conditionals[1]->logNormalizationConstant(); + const double c1 = std::sqrt(2.0 * C1); + Vector expected_unwhitened(2); + expected_unwhitened << 4.9 - 5.0, -c1; + Vector actual_unwhitened = jf1->unweighted_error(vv); + EXPECT(assert_equal(expected_unwhitened, actual_unwhitened)); + + // Make sure the noise model does not touch it. + Vector expected_whitened(2); + expected_whitened << (4.9 - 5.0) / 3.0, -c1; + Vector actual_whitened = jf1->error_vector(vv); + EXPECT(assert_equal(expected_whitened, actual_whitened)); + + // Check that the error is equal to the mixture error: + EXPECT_DOUBLES_EQUAL(mixture.error(hv1), jf1->error(hv1), 1e-8); + } + + // Check that the ratio of probPrime to evaluate is the same for all modes. + std::vector ratio(2); + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + ratio[mode] = std::exp(-likelihood->error(hv)) / mixture.evaluate(hv); + } + EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp index cb9068c303..962d238a89 100644 --- a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp +++ b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file GaussianMixtureFactor.cpp + * @file testGaussianMixtureFactor.cpp * @brief Unit tests for GaussianMixtureFactor * @author Varun Agrawal * @author Fan Jiang @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -79,7 +80,7 @@ TEST(GaussianMixtureFactor, Sum) { // Create sum of two mixture factors: it will be a decision tree now on both // discrete variables m1 and m2: - GaussianMixtureFactor::Sum sum; + GaussianFactorGraphTree sum; sum += mixtureFactorA; sum += mixtureFactorB; @@ -135,7 +136,7 @@ TEST(GaussianMixtureFactor, Printing) { EXPECT(assert_print_equal(expected, mixtureFactor)); } -TEST_UNSAFE(GaussianMixtureFactor, GaussianMixture) { +TEST(GaussianMixtureFactor, GaussianMixture) { KeyVector keys; keys.push_back(X(0)); keys.push_back(X(1)); @@ -151,6 +152,47 @@ TEST_UNSAFE(GaussianMixtureFactor, GaussianMixture) { EXPECT_LONGS_EQUAL(2, gm.discreteKeys().size()); } +/* ************************************************************************* */ +// Test the error of the GaussianMixtureFactor +TEST(GaussianMixtureFactor, Error) { + DiscreteKey m1(1, 2); + + auto A01 = Matrix2::Identity(); + auto A02 = Matrix2::Identity(); + + auto A11 = Matrix2::Identity(); + auto A12 = Matrix2::Identity() * 2; + + auto b = Vector2::Zero(); + + auto f0 = boost::make_shared(X(1), A01, X(2), A02, b); + auto f1 = boost::make_shared(X(1), A11, X(2), A12, b); + std::vector factors{f0, f1}; + + GaussianMixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); + + VectorValues continuousValues; + continuousValues.insert(X(1), Vector2(0, 0)); + continuousValues.insert(X(2), Vector2(1, 1)); + + // error should return a tree of errors, with nodes for each discrete value. + AlgebraicDecisionTree error_tree = mixtureFactor.error(continuousValues); + + std::vector discrete_keys = {m1}; + // Error values for regression test + std::vector errors = {1, 4}; + AlgebraicDecisionTree expected_error(discrete_keys, errors); + + EXPECT(assert_equal(expected_error, error_tree)); + + // Test for single leaf given discrete assignment P(X|M,Z). + DiscreteValues discreteValues; + discreteValues[m1.first] = 1; + EXPECT_DOUBLES_EQUAL( + 4.0, mixtureFactor.error({continuousValues, discreteValues}), + 1e-9); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index e1fe724695..425e93415f 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -18,63 +18,118 @@ * @date December 2021 */ -#include #include #include #include #include "Switching.h" +#include "TinyHybridExample.h" // Include for test suite #include using namespace std; using namespace gtsam; -using namespace gtsam::serializationTestHelpers; using noiseModel::Isotropic; using symbol_shorthand::M; using symbol_shorthand::X; +using symbol_shorthand::Z; -static const DiscreteKey Asia(0, 2); +static const Key asiaKey = 0; +static const DiscreteKey Asia(asiaKey, 2); /* ****************************************************************************/ -// Test creation +// Test creation of a pure discrete Bayes net. TEST(HybridBayesNet, Creation) { HybridBayesNet bayesNet; - - bayesNet.add(Asia, "99/1"); + bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); DiscreteConditional expected(Asia, "99/1"); - - CHECK(bayesNet.atDiscrete(0)); - auto& df = *bayesNet.atDiscrete(0); - EXPECT(df.equals(expected)); + CHECK(bayesNet.at(0)->asDiscrete()); + EXPECT(assert_equal(expected, *bayesNet.at(0)->asDiscrete())); } /* ****************************************************************************/ -// Test adding a bayes net to another one. +// Test adding a Bayes net to another one. TEST(HybridBayesNet, Add) { HybridBayesNet bayesNet; - - bayesNet.add(Asia, "99/1"); - - DiscreteConditional expected(Asia, "99/1"); + bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); HybridBayesNet other; - other.push_back(bayesNet); + other.add(bayesNet); EXPECT(bayesNet.equals(other)); } +/* ****************************************************************************/ +// Test evaluate for a pure discrete Bayes net P(Asia). +TEST(HybridBayesNet, EvaluatePureDiscrete) { + HybridBayesNet bayesNet; + bayesNet.emplace_back(new DiscreteConditional(Asia, "4/6")); + HybridValues values; + values.insert(asiaKey, 0); + EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(values), 1e-9); +} + +/* ****************************************************************************/ +// Test creation of a tiny hybrid Bayes net. +TEST(HybridBayesNet, Tiny) { + auto bn = tiny::createHybridBayesNet(); + EXPECT_LONGS_EQUAL(3, bn.size()); + + const VectorValues vv{{Z(0), Vector1(5.0)}, {X(0), Vector1(5.0)}}; + auto fg = bn.toFactorGraph(vv); + EXPECT_LONGS_EQUAL(3, fg.size()); + + // Check that the ratio of probPrime to evaluate is the same for all modes. + std::vector ratio(2); + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + ratio[mode] = std::exp(-fg.error(hv)) / bn.evaluate(hv); + } + EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); +} + +/* ****************************************************************************/ +// Test evaluate for a hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). +TEST(HybridBayesNet, evaluateHybrid) { + const auto continuousConditional = GaussianConditional::sharedMeanAndStddev( + X(0), 2 * I_1x1, X(1), Vector1(-4.0), 5.0); + + const SharedDiagonal model0 = noiseModel::Diagonal::Sigmas(Vector1(2.0)), + model1 = noiseModel::Diagonal::Sigmas(Vector1(3.0)); + + const auto conditional0 = boost::make_shared( + X(1), Vector1::Constant(5), I_1x1, model0), + conditional1 = boost::make_shared( + X(1), Vector1::Constant(2), I_1x1, model1); + + // Create hybrid Bayes net. + HybridBayesNet bayesNet; + bayesNet.push_back(continuousConditional); + bayesNet.emplace_back( + new GaussianMixture({X(1)}, {}, {Asia}, {conditional0, conditional1})); + bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); + + // Create values at which to evaluate. + HybridValues values; + values.insert(asiaKey, 0); + values.insert(X(0), Vector1(-6)); + values.insert(X(1), Vector1(1)); + + const double conditionalProbability = + continuousConditional->evaluate(values.continuous()); + const double mixtureProbability = conditional0->evaluate(values.continuous()); + EXPECT_DOUBLES_EQUAL(conditionalProbability * mixtureProbability * 0.99, + bayesNet.evaluate(values), 1e-9); +} + /* ****************************************************************************/ // Test choosing an assignment of conditionals TEST(HybridBayesNet, Choose) { Switching s(4); - Ordering ordering; - for (auto&& kvp : s.linearizationPoint) { - ordering += kvp.key; - } + const Ordering ordering(s.linearizationPoint.keys()); HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; @@ -90,29 +145,22 @@ TEST(HybridBayesNet, Choose) { EXPECT_LONGS_EQUAL(4, gbn.size()); - EXPECT(assert_equal(*(*boost::dynamic_pointer_cast( - hybridBayesNet->atMixture(0)))(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asMixture())(assignment), *gbn.at(0))); - EXPECT(assert_equal(*(*boost::dynamic_pointer_cast( - hybridBayesNet->atMixture(1)))(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asMixture())(assignment), *gbn.at(1))); - EXPECT(assert_equal(*(*boost::dynamic_pointer_cast( - hybridBayesNet->atMixture(2)))(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asMixture())(assignment), *gbn.at(2))); - EXPECT(assert_equal(*(*boost::dynamic_pointer_cast( - hybridBayesNet->atMixture(3)))(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asMixture())(assignment), *gbn.at(3))); } /* ****************************************************************************/ -// Test bayes net optimize +// Test Bayes net optimize TEST(HybridBayesNet, OptimizeAssignment) { Switching s(4); - Ordering ordering; - for (auto&& kvp : s.linearizationPoint) { - ordering += kvp.key; - } + const Ordering ordering(s.linearizationPoint.keys()); HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; @@ -139,62 +187,90 @@ TEST(HybridBayesNet, OptimizeAssignment) { } /* ****************************************************************************/ -// Test bayes net optimize +// Test Bayes net optimize TEST(HybridBayesNet, Optimize) { - Switching s(4); + Switching s(4, 1.0, 0.1, {0, 1, 2, 3}, "1/1 1/1"); - Ordering hybridOrdering = s.linearizedFactorGraph.getHybridOrdering(); HybridBayesNet::shared_ptr hybridBayesNet = - s.linearizedFactorGraph.eliminateSequential(hybridOrdering); + s.linearizedFactorGraph.eliminateSequential(); HybridValues delta = hybridBayesNet->optimize(); + // NOTE: The true assignment is 111, but the discrete priors cause 101 DiscreteValues expectedAssignment; expectedAssignment[M(0)] = 1; - expectedAssignment[M(1)] = 0; + expectedAssignment[M(1)] = 1; expectedAssignment[M(2)] = 1; EXPECT(assert_equal(expectedAssignment, delta.discrete())); VectorValues expectedValues; - expectedValues.insert(X(0), -0.999904 * Vector1::Ones()); - expectedValues.insert(X(1), -0.99029 * Vector1::Ones()); - expectedValues.insert(X(2), -1.00971 * Vector1::Ones()); - expectedValues.insert(X(3), -1.0001 * Vector1::Ones()); + expectedValues.insert(X(0), -Vector1::Ones()); + expectedValues.insert(X(1), -Vector1::Ones()); + expectedValues.insert(X(2), -Vector1::Ones()); + expectedValues.insert(X(3), -Vector1::Ones()); EXPECT(assert_equal(expectedValues, delta.continuous(), 1e-5)); } /* ****************************************************************************/ -// Test bayes net multifrontal optimize -TEST(HybridBayesNet, OptimizeMultifrontal) { - Switching s(4); - - Ordering hybridOrdering = s.linearizedFactorGraph.getHybridOrdering(); - HybridBayesTree::shared_ptr hybridBayesTree = - s.linearizedFactorGraph.eliminateMultifrontal(hybridOrdering); - HybridValues delta = hybridBayesTree->optimize(); - - VectorValues expectedValues; - expectedValues.insert(X(0), -0.999904 * Vector1::Ones()); - expectedValues.insert(X(1), -0.99029 * Vector1::Ones()); - expectedValues.insert(X(2), -1.00971 * Vector1::Ones()); - expectedValues.insert(X(3), -1.0001 * Vector1::Ones()); - - EXPECT(assert_equal(expectedValues, delta.continuous(), 1e-5)); +// Test Bayes net error +TEST(HybridBayesNet, Pruning) { + Switching s(3); + + HybridBayesNet::shared_ptr posterior = + s.linearizedFactorGraph.eliminateSequential(); + EXPECT_LONGS_EQUAL(5, posterior->size()); + + HybridValues delta = posterior->optimize(); + auto actualTree = posterior->evaluate(delta.continuous()); + + // Regression test on density tree. + std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; + std::vector leaves = {6.1112424, 20.346113, 17.785849, 19.738098}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + EXPECT(assert_equal(expected, actualTree, 1e-6)); + + // Prune and get probabilities + auto prunedBayesNet = posterior->prune(2); + auto prunedTree = prunedBayesNet.evaluate(delta.continuous()); + + // Regression test on pruned logProbability tree + std::vector pruned_leaves = {0.0, 20.346113, 0.0, 19.738098}; + AlgebraicDecisionTree expected_pruned(discrete_keys, pruned_leaves); + EXPECT(assert_equal(expected_pruned, prunedTree, 1e-6)); + + // Verify logProbability computation and check specific logProbability value + const DiscreteValues discrete_values{{M(0), 1}, {M(1), 1}}; + const HybridValues hybridValues{delta.continuous(), discrete_values}; + double logProbability = 0; + logProbability += posterior->at(0)->asMixture()->logProbability(hybridValues); + logProbability += posterior->at(1)->asMixture()->logProbability(hybridValues); + logProbability += posterior->at(2)->asMixture()->logProbability(hybridValues); + // NOTE(dellaert): the discrete errors were not added in logProbability tree! + logProbability += + posterior->at(3)->asDiscrete()->logProbability(hybridValues); + logProbability += + posterior->at(4)->asDiscrete()->logProbability(hybridValues); + + double density = exp(logProbability); + EXPECT_DOUBLES_EQUAL(density, actualTree(discrete_values), 1e-9); + EXPECT_DOUBLES_EQUAL(density, prunedTree(discrete_values), 1e-9); + EXPECT_DOUBLES_EQUAL(logProbability, posterior->logProbability(hybridValues), + 1e-9); } /* ****************************************************************************/ -// Test bayes net pruning +// Test Bayes net pruning TEST(HybridBayesNet, Prune) { Switching s(4); - Ordering hybridOrdering = s.linearizedFactorGraph.getHybridOrdering(); - HybridBayesNet::shared_ptr hybridBayesNet = - s.linearizedFactorGraph.eliminateSequential(hybridOrdering); + HybridBayesNet::shared_ptr posterior = + s.linearizedFactorGraph.eliminateSequential(); + EXPECT_LONGS_EQUAL(7, posterior->size()); - HybridValues delta = hybridBayesNet->optimize(); + HybridValues delta = posterior->optimize(); - auto prunedBayesNet = hybridBayesNet->prune(2); + auto prunedBayesNet = posterior->prune(2); HybridValues pruned_delta = prunedBayesNet.optimize(); EXPECT(assert_equal(delta.discrete(), pruned_delta.discrete())); @@ -202,16 +278,16 @@ TEST(HybridBayesNet, Prune) { } /* ****************************************************************************/ -// Test bayes net updateDiscreteConditionals +// Test Bayes net updateDiscreteConditionals TEST(HybridBayesNet, UpdateDiscreteConditionals) { Switching s(4); - Ordering hybridOrdering = s.linearizedFactorGraph.getHybridOrdering(); - HybridBayesNet::shared_ptr hybridBayesNet = - s.linearizedFactorGraph.eliminateSequential(hybridOrdering); + HybridBayesNet::shared_ptr posterior = + s.linearizedFactorGraph.eliminateSequential(); + EXPECT_LONGS_EQUAL(7, posterior->size()); size_t maxNrLeaves = 3; - auto discreteConditionals = hybridBayesNet->discreteConditionals(); + auto discreteConditionals = posterior->discreteConditionals(); const DecisionTreeFactor::shared_ptr prunedDecisionTree = boost::make_shared( discreteConditionals->prune(maxNrLeaves)); @@ -219,11 +295,10 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { EXPECT_LONGS_EQUAL(maxNrLeaves + 2 /*2 zero leaves*/, prunedDecisionTree->nrLeaves()); - auto original_discrete_conditionals = - *(hybridBayesNet->at(4)->asDiscreteConditional()); + auto original_discrete_conditionals = *(posterior->at(4)->asDiscrete()); // Prune! - hybridBayesNet->prune(maxNrLeaves); + posterior->prune(maxNrLeaves); // Functor to verify values against the original_discrete_conditionals auto checker = [&](const Assignment& assignment, @@ -240,8 +315,7 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { }; // Get the pruned discrete conditionals as an AlgebraicDecisionTree - auto pruned_discrete_conditionals = - hybridBayesNet->at(4)->asDiscreteConditional(); + auto pruned_discrete_conditionals = posterior->at(4)->asDiscrete(); auto discrete_conditional_tree = boost::dynamic_pointer_cast( pruned_discrete_conditionals); @@ -251,15 +325,70 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { } /* ****************************************************************************/ -// Test HybridBayesNet serialization. -TEST(HybridBayesNet, Serialization) { - Switching s(4); - Ordering ordering = s.linearizedFactorGraph.getHybridOrdering(); - HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential(ordering)); +// Test HybridBayesNet sampling. +TEST(HybridBayesNet, Sampling) { + HybridNonlinearFactorGraph nfg; + + auto noise_model = noiseModel::Diagonal::Sigmas(Vector1(1.0)); + auto zero_motion = + boost::make_shared>(X(0), X(1), 0, noise_model); + auto one_motion = + boost::make_shared>(X(0), X(1), 1, noise_model); + std::vector factors = {zero_motion, one_motion}; + nfg.emplace_shared>(X(0), 0.0, noise_model); + nfg.emplace_shared( + KeyVector{X(0), X(1)}, DiscreteKeys{DiscreteKey(M(0), 2)}, factors); + + DiscreteKey mode(M(0), 2); + nfg.emplace_shared(mode, "1/1"); + + Values initial; + double z0 = 0.0, z1 = 1.0; + initial.insert(X(0), z0); + initial.insert(X(1), z1); + + // Create the factor graph from the nonlinear factor graph. + HybridGaussianFactorGraph::shared_ptr fg = nfg.linearize(initial); + // Eliminate into BN + HybridBayesNet::shared_ptr bn = fg->eliminateSequential(); + + // Set up sampling + std::mt19937_64 gen(11); + + // Initialize containers for computing the mean values. + vector discrete_samples; + VectorValues average_continuous; + + size_t num_samples = 1000; + for (size_t i = 0; i < num_samples; i++) { + // Sample + HybridValues sample = bn->sample(&gen); + + discrete_samples.push_back(sample.discrete().at(M(0))); + + if (i == 0) { + average_continuous.insert(sample.continuous()); + } else { + average_continuous += sample.continuous(); + } + } - EXPECT(equalsObj(hbn)); - EXPECT(equalsXML(hbn)); - EXPECT(equalsBinary(hbn)); + EXPECT_LONGS_EQUAL(2, average_continuous.size()); + EXPECT_LONGS_EQUAL(num_samples, discrete_samples.size()); + + // Regressions don't work across platforms :-( + // // regression for specific RNG seed + // double discrete_sum = + // std::accumulate(discrete_samples.begin(), discrete_samples.end(), + // decltype(discrete_samples)::value_type(0)); + // EXPECT_DOUBLES_EQUAL(0.477, discrete_sum / num_samples, 1e-9); + + // VectorValues expected; + // expected.insert({X(0), Vector1(-0.0131207162712)}); + // expected.insert({X(1), Vector1(-0.499026377568)}); + // // regression for specific RNG seed + // EXPECT(assert_equal(expected, average_continuous.scale(1.0 / + // num_samples))); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridBayesTree.cpp b/gtsam/hybrid/tests/testHybridBayesTree.cpp index 876c550cb0..2c2bdc3c05 100644 --- a/gtsam/hybrid/tests/testHybridBayesTree.cpp +++ b/gtsam/hybrid/tests/testHybridBayesTree.cpp @@ -32,6 +32,24 @@ using noiseModel::Isotropic; using symbol_shorthand::M; using symbol_shorthand::X; +/* ****************************************************************************/ +// Test multifrontal optimize +TEST(HybridBayesTree, OptimizeMultifrontal) { + Switching s(4); + + HybridBayesTree::shared_ptr hybridBayesTree = + s.linearizedFactorGraph.eliminateMultifrontal(); + HybridValues delta = hybridBayesTree->optimize(); + + VectorValues expectedValues; + expectedValues.insert(X(0), -0.999904 * Vector1::Ones()); + expectedValues.insert(X(1), -0.99029 * Vector1::Ones()); + expectedValues.insert(X(2), -1.00971 * Vector1::Ones()); + expectedValues.insert(X(3), -1.0001 * Vector1::Ones()); + + EXPECT(assert_equal(expectedValues, delta.continuous(), 1e-5)); +} + /* ****************************************************************************/ // Test for optimizing a HybridBayesTree with a given assignment. TEST(HybridBayesTree, OptimizeAssignment) { @@ -105,7 +123,7 @@ TEST(HybridBayesTree, Optimize) { graph1.push_back(s.linearizedFactorGraph.at(i)); } - // Add the Gaussian factors, 1 prior on X(1), + // Add the Gaussian factors, 1 prior on X(0), // 3 measurements on X(2), X(3), X(4) graph1.push_back(s.linearizedFactorGraph.at(0)); for (size_t i = 4; i <= 6; i++) { @@ -132,11 +150,17 @@ TEST(HybridBayesTree, Optimize) { DiscreteFactorGraph dfg; for (auto&& f : *remainingFactorGraph) { - auto factor = dynamic_pointer_cast(f); - dfg.push_back( - boost::dynamic_pointer_cast(factor->inner())); + auto discreteFactor = dynamic_pointer_cast(f); + assert(discreteFactor); + dfg.push_back(discreteFactor); } + // Add the probabilities for each branch + DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}, {M(2), 2}}; + vector probs = {0.012519475, 0.041280228, 0.075018647, 0.081663656, + 0.037152205, 0.12248971, 0.07349729, 0.08}; + dfg.emplace_shared(discrete_keys, probs); + DiscreteValues expectedMPE = dfg.optimize(); VectorValues expectedValues = hybridBayesNet->optimize(expectedMPE); @@ -145,17 +169,46 @@ TEST(HybridBayesTree, Optimize) { } /* ****************************************************************************/ -// Test HybridBayesTree serialization. -TEST(HybridBayesTree, Serialization) { +// Test for choosing a GaussianBayesTree from a HybridBayesTree. +TEST(HybridBayesTree, Choose) { Switching s(4); - Ordering ordering = s.linearizedFactorGraph.getHybridOrdering(); - HybridBayesTree hbt = - *(s.linearizedFactorGraph.eliminateMultifrontal(ordering)); - - using namespace gtsam::serializationTestHelpers; - EXPECT(equalsObj(hbt)); - EXPECT(equalsXML(hbt)); - EXPECT(equalsBinary(hbt)); + + HybridGaussianISAM isam; + HybridGaussianFactorGraph graph1; + + // Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4 + for (size_t i = 1; i < 4; i++) { + graph1.push_back(s.linearizedFactorGraph.at(i)); + } + + // Add the Gaussian factors, 1 prior on X(0), + // 3 measurements on X(2), X(3), X(4) + graph1.push_back(s.linearizedFactorGraph.at(0)); + for (size_t i = 4; i <= 6; i++) { + graph1.push_back(s.linearizedFactorGraph.at(i)); + } + + // Add the discrete factors + for (size_t i = 7; i <= 9; i++) { + graph1.push_back(s.linearizedFactorGraph.at(i)); + } + + isam.update(graph1); + + DiscreteValues assignment; + assignment[M(0)] = 1; + assignment[M(1)] = 1; + assignment[M(2)] = 1; + + GaussianBayesTree gbt = isam.choose(assignment); + + // Specify ordering so it matches that of HybridGaussianISAM. + Ordering ordering(KeyVector{X(0), X(1), X(2), X(3), M(0), M(1), M(2)}); + auto bayesTree = s.linearizedFactorGraph.eliminateMultifrontal(ordering); + + auto expected_gbt = bayesTree->choose(assignment); + + EXPECT(assert_equal(expected_gbt, gbt)); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridConditional.cpp b/gtsam/hybrid/tests/testHybridConditional.cpp new file mode 100644 index 0000000000..406306df7c --- /dev/null +++ b/gtsam/hybrid/tests/testHybridConditional.cpp @@ -0,0 +1,83 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridConditional.cpp + * @brief Unit tests for HybridConditional class + * @date January 2023 + */ + +#include + +#include "TinyHybridExample.h" + +// Include for test suite +#include + +using namespace gtsam; + +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +/* ****************************************************************************/ +// Check invariants for all conditionals in a tiny Bayes net. +TEST(HybridConditional, Invariants) { + // Create hybrid Bayes net p(z|x,m)p(x)P(m) + auto bn = tiny::createHybridBayesNet(); + + // Create values to check invariants. + const VectorValues c{{X(0), Vector1(5.1)}, {Z(0), Vector1(4.9)}}; + const DiscreteValues d{{M(0), 1}}; + const HybridValues values{c, d}; + + // Check invariants for p(z|x,m) + auto hc0 = bn.at(0); + CHECK(hc0->isHybrid()); + + // Check invariants as a GaussianMixture. + const auto mixture = hc0->asMixture(); + EXPECT(GaussianMixture::CheckInvariants(*mixture, values)); + + // Check invariants as a HybridConditional. + EXPECT(HybridConditional::CheckInvariants(*hc0, values)); + + // Check invariants for p(x) + auto hc1 = bn.at(1); + CHECK(hc1->isContinuous()); + + // Check invariants as a GaussianConditional. + const auto gaussian = hc1->asGaussian(); + EXPECT(GaussianConditional::CheckInvariants(*gaussian, c)); + EXPECT(GaussianConditional::CheckInvariants(*gaussian, values)); + + // Check invariants as a HybridConditional. + EXPECT(HybridConditional::CheckInvariants(*hc1, values)); + + // Check invariants for p(m) + auto hc2 = bn.at(2); + CHECK(hc2->isDiscrete()); + + // Check invariants as a DiscreteConditional. + const auto discrete = hc2->asDiscrete(); + EXPECT(DiscreteConditional::CheckInvariants(*discrete, d)); + EXPECT(DiscreteConditional::CheckInvariants(*discrete, values)); + + // Check invariants as a HybridConditional. + EXPECT(HybridConditional::CheckInvariants(*hc2, values)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 6be7566ae0..a0195d0c56 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -15,6 +15,7 @@ * @author Varun Agrawal */ +#include #include #include #include @@ -23,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -44,7 +46,7 @@ Ordering getOrdering(HybridGaussianFactorGraph& factors, const HybridGaussianFactorGraph& newFactors) { factors += newFactors; // Get all the discrete keys from the factors - KeySet allDiscrete = factors.discreteKeys(); + KeySet allDiscrete = factors.discreteKeySet(); // Create KeyVector with continuous keys followed by discrete keys. KeyVector newKeysDiscreteLast; @@ -69,15 +71,59 @@ Ordering getOrdering(HybridGaussianFactorGraph& factors, return ordering; } +TEST(HybridEstimation, Full) { + size_t K = 6; + std::vector measurements = {0, 1, 2, 2, 2, 3}; + // Ground truth discrete seq + std::vector discrete_seq = {1, 1, 0, 0, 1}; + // Switching example of robot moving in 1D + // with given measurements and equal mode priors. + Switching switching(K, 1.0, 0.1, measurements, "1/1 1/1"); + HybridGaussianFactorGraph graph = switching.linearizedFactorGraph; + + Ordering hybridOrdering; + for (size_t k = 0; k < K; k++) { + hybridOrdering += X(k); + } + for (size_t k = 0; k < K - 1; k++) { + hybridOrdering += M(k); + } + + HybridBayesNet::shared_ptr bayesNet = + graph.eliminateSequential(); + + EXPECT_LONGS_EQUAL(2 * K - 1, bayesNet->size()); + + HybridValues delta = bayesNet->optimize(); + + Values initial = switching.linearizationPoint; + Values result = initial.retract(delta.continuous()); + + DiscreteValues expected_discrete; + for (size_t k = 0; k < K - 1; k++) { + expected_discrete[M(k)] = discrete_seq[k]; + } + EXPECT(assert_equal(expected_discrete, delta.discrete())); + + Values expected_continuous; + for (size_t k = 0; k < K; k++) { + expected_continuous.insert(X(k), measurements[k]); + } + EXPECT(assert_equal(expected_continuous, result)); +} + /****************************************************************************/ // Test approximate inference with an additional pruning step. -TEST(HybridNonlinearISAM, Incremental) { - size_t K = 10; - std::vector measurements = {0, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6}; +TEST(HybridEstimation, Incremental) { + size_t K = 15; + std::vector measurements = {0, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, + 7, 8, 9, 9, 9, 10, 11, 11, 11, 11}; // Ground truth discrete seq - std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0}; - Switching switching(K, 1.0, 0.1, measurements); - // HybridNonlinearISAM smoother; + std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0, + 1, 1, 1, 0, 0, 1, 1, 0, 0, 0}; + // Switching example of robot moving in 1D + // with given measurements and equal mode priors. + Switching switching(K, 1.0, 0.1, measurements, "1/1 1/1"); HybridSmoother smoother; HybridNonlinearFactorGraph graph; Values initial; @@ -90,7 +136,6 @@ TEST(HybridNonlinearISAM, Incremental) { HybridGaussianFactorGraph bayesNet; for (size_t k = 1; k < K; k++) { - std::cout << ">>>>>>>>>>>>>>>>>>> k=" << k << std::endl; // Motion Model graph.push_back(switching.nonlinearFactorGraph.at(k)); // Measurement @@ -105,6 +150,7 @@ TEST(HybridNonlinearISAM, Incremental) { smoother.update(linearized, ordering, 3); graph.resize(0); } + HybridValues delta = smoother.hybridBayesNet().optimize(); Values result = initial.retract(delta.continuous()); @@ -122,6 +168,347 @@ TEST(HybridNonlinearISAM, Incremental) { EXPECT(assert_equal(expected_continuous, result)); } +/** + * @brief A function to get a specific 1D robot motion problem as a linearized + * factor graph. This is the problem P(X|Z, M), i.e. estimating the continuous + * positions given the measurements and discrete sequence. + * + * @param K The number of timesteps. + * @param measurements The vector of measurements for each timestep. + * @param discrete_seq The discrete sequence governing the motion of the robot. + * @param measurement_sigma Noise model sigma for measurements. + * @param between_sigma Noise model sigma for the between factor. + * @return GaussianFactorGraph::shared_ptr + */ +GaussianFactorGraph::shared_ptr specificModesFactorGraph( + size_t K, const std::vector& measurements, + const std::vector& discrete_seq, double measurement_sigma = 0.1, + double between_sigma = 1.0) { + NonlinearFactorGraph graph; + Values linearizationPoint; + + // Add measurement factors + auto measurement_noise = noiseModel::Isotropic::Sigma(1, measurement_sigma); + for (size_t k = 0; k < K; k++) { + graph.emplace_shared>(X(k), measurements.at(k), + measurement_noise); + linearizationPoint.insert(X(k), static_cast(k + 1)); + } + + using MotionModel = BetweenFactor; + + // Add "motion models". + auto motion_noise_model = noiseModel::Isotropic::Sigma(1, between_sigma); + for (size_t k = 0; k < K - 1; k++) { + auto motion_model = boost::make_shared( + X(k), X(k + 1), discrete_seq.at(k), motion_noise_model); + graph.push_back(motion_model); + } + GaussianFactorGraph::shared_ptr linear_graph = + graph.linearize(linearizationPoint); + return linear_graph; +} + +/** + * @brief Get the discrete sequence from the integer `x`. + * + * @tparam K Template parameter so we can set the correct bitset size. + * @param x The integer to convert to a discrete binary sequence. + * @return std::vector + */ +template +std::vector getDiscreteSequence(size_t x) { + std::bitset seq = x; + std::vector discrete_seq(K - 1); + for (size_t i = 0; i < K - 1; i++) { + // Save to discrete vector in reverse order + discrete_seq[K - 2 - i] = seq[i]; + } + return discrete_seq; +} + +/** + * @brief Helper method to get the tree of + * unnormalized probabilities as per the elimination scheme. + * + * Used as a helper to compute q(\mu | M, Z) which is used by + * both P(X | M, Z) and P(M | Z). + * + * @param graph The HybridGaussianFactorGraph to eliminate. + * @return AlgebraicDecisionTree + */ +AlgebraicDecisionTree getProbPrimeTree( + const HybridGaussianFactorGraph& graph) { + HybridBayesNet::shared_ptr bayesNet; + HybridGaussianFactorGraph::shared_ptr remainingGraph; + Ordering continuous(graph.continuousKeySet()); + std::tie(bayesNet, remainingGraph) = + graph.eliminatePartialSequential(continuous); + + auto last_conditional = bayesNet->at(bayesNet->size() - 1); + DiscreteKeys discrete_keys = last_conditional->discreteKeys(); + + const std::vector assignments = + DiscreteValues::CartesianProduct(discrete_keys); + + std::reverse(discrete_keys.begin(), discrete_keys.end()); + + vector vector_values; + for (const DiscreteValues& assignment : assignments) { + VectorValues values = bayesNet->optimize(assignment); + vector_values.push_back(boost::make_shared(values)); + } + DecisionTree delta_tree(discrete_keys, + vector_values); + + // Get the probPrime tree with the correct leaf probabilities + std::vector probPrimes; + for (const DiscreteValues& assignment : assignments) { + VectorValues delta = *delta_tree(assignment); + + // If VectorValues is empty, it means this is a pruned branch. + // Set the probPrime to 0.0. + if (delta.size() == 0) { + probPrimes.push_back(0.0); + continue; + } + + double error = graph.error({delta, assignment}); + probPrimes.push_back(exp(-error)); + } + AlgebraicDecisionTree probPrimeTree(discrete_keys, probPrimes); + return probPrimeTree; +} + +/********************************************************************************* + * Test for correctness of different branches of the P'(Continuous | Discrete). + * The values should match those of P'(Continuous) for each discrete mode. + ********************************************************************************/ +TEST(HybridEstimation, Probability) { + constexpr size_t K = 4; + std::vector measurements = {0, 1, 2, 2}; + double between_sigma = 1.0, measurement_sigma = 0.1; + + // Switching example of robot moving in 1D with + // given measurements and equal mode priors. + Switching switching(K, between_sigma, measurement_sigma, measurements, + "1/1 1/1"); + auto graph = switching.linearizedFactorGraph; + + // Continuous elimination + Ordering continuous_ordering(graph.continuousKeySet()); + HybridBayesNet::shared_ptr bayesNet; + HybridGaussianFactorGraph::shared_ptr discreteGraph; + std::tie(bayesNet, discreteGraph) = + graph.eliminatePartialSequential(continuous_ordering); + + // Discrete elimination + Ordering discrete_ordering(graph.discreteKeySet()); + auto discreteBayesNet = discreteGraph->eliminateSequential(discrete_ordering); + + // Add the discrete conditionals to make it a full bayes net. + for (auto discrete_conditional : *discreteBayesNet) { + bayesNet->add(discrete_conditional); + } + auto discreteConditional = discreteBayesNet->at(0)->asDiscrete(); + + HybridValues hybrid_values = bayesNet->optimize(); + + // This is the correct sequence as designed + DiscreteValues discrete_seq; + discrete_seq[M(0)] = 1; + discrete_seq[M(1)] = 1; + discrete_seq[M(2)] = 0; + + EXPECT(assert_equal(discrete_seq, hybrid_values.discrete())); +} + +/****************************************************************************/ +/** + * Test for correctness of different branches of the P'(Continuous | Discrete) + * in the multi-frontal setting. The values should match those of P'(Continuous) + * for each discrete mode. + */ +TEST(HybridEstimation, ProbabilityMultifrontal) { + constexpr size_t K = 4; + std::vector measurements = {0, 1, 2, 2}; + + double between_sigma = 1.0, measurement_sigma = 0.1; + + // Switching example of robot moving in 1D with given measurements and equal + // mode priors. + Switching switching(K, between_sigma, measurement_sigma, measurements, + "1/1 1/1"); + auto graph = switching.linearizedFactorGraph; + Ordering ordering = getOrdering(graph, HybridGaussianFactorGraph()); + + // Get the tree of unnormalized probabilities for each mode sequence. + AlgebraicDecisionTree expected_probPrimeTree = getProbPrimeTree(graph); + + // Eliminate continuous + Ordering continuous_ordering(graph.continuousKeySet()); + HybridBayesTree::shared_ptr bayesTree; + HybridGaussianFactorGraph::shared_ptr discreteGraph; + std::tie(bayesTree, discreteGraph) = + graph.eliminatePartialMultifrontal(continuous_ordering); + + // Get the last continuous conditional which will have all the discrete keys + Key last_continuous_key = + continuous_ordering.at(continuous_ordering.size() - 1); + auto last_conditional = (*bayesTree)[last_continuous_key]->conditional(); + DiscreteKeys discrete_keys = last_conditional->discreteKeys(); + + Ordering discrete(graph.discreteKeySet()); + auto discreteBayesTree = discreteGraph->eliminateMultifrontal(discrete); + + EXPECT_LONGS_EQUAL(1, discreteBayesTree->size()); + // DiscreteBayesTree should have only 1 clique + auto discrete_clique = (*discreteBayesTree)[discrete.at(0)]; + + std::set clique_set; + for (auto node : bayesTree->nodes()) { + clique_set.insert(node.second); + } + + // Set the root of the bayes tree as the discrete clique + for (auto clique : clique_set) { + if (clique->conditional()->parents() == + discrete_clique->conditional()->frontals()) { + discreteBayesTree->addClique(clique, discrete_clique); + + } else { + // Remove the clique from the children of the parents since + // it will get added again in addClique. + auto clique_it = std::find(clique->parent()->children.begin(), + clique->parent()->children.end(), clique); + clique->parent()->children.erase(clique_it); + discreteBayesTree->addClique(clique, clique->parent()); + } + } + + HybridValues hybrid_values = discreteBayesTree->optimize(); + + // This is the correct sequence as designed + DiscreteValues discrete_seq; + discrete_seq[M(0)] = 1; + discrete_seq[M(1)] = 1; + discrete_seq[M(2)] = 0; + + EXPECT(assert_equal(discrete_seq, hybrid_values.discrete())); +} + +/********************************************************************************* + // Create a hybrid nonlinear factor graph f(x0, x1, m0; z0, z1) + ********************************************************************************/ +static HybridNonlinearFactorGraph createHybridNonlinearFactorGraph() { + HybridNonlinearFactorGraph nfg; + + constexpr double sigma = 0.5; // measurement noise + const auto noise_model = noiseModel::Isotropic::Sigma(1, sigma); + + // Add "measurement" factors: + nfg.emplace_shared>(X(0), 0.0, noise_model); + nfg.emplace_shared>(X(1), 1.0, noise_model); + + // Add mixture factor: + DiscreteKey m(M(0), 2); + const auto zero_motion = + boost::make_shared>(X(0), X(1), 0, noise_model); + const auto one_motion = + boost::make_shared>(X(0), X(1), 1, noise_model); + nfg.emplace_shared( + KeyVector{X(0), X(1)}, DiscreteKeys{m}, + std::vector{zero_motion, one_motion}); + + return nfg; +} + +/********************************************************************************* + // Create a hybrid linear factor graph f(x0, x1, m0; z0, z1) + ********************************************************************************/ +static HybridGaussianFactorGraph::shared_ptr createHybridGaussianFactorGraph() { + HybridNonlinearFactorGraph nfg = createHybridNonlinearFactorGraph(); + + Values initial; + double z0 = 0.0, z1 = 1.0; + initial.insert(X(0), z0); + initial.insert(X(1), z1); + return nfg.linearize(initial); +} + +/********************************************************************************* + * Do hybrid elimination and do regression test on discrete conditional. + ********************************************************************************/ +TEST(HybridEstimation, eliminateSequentialRegression) { + // Create the factor graph from the nonlinear factor graph. + HybridGaussianFactorGraph::shared_ptr fg = createHybridGaussianFactorGraph(); + + // Create expected discrete conditional on m0. + DiscreteKey m(M(0), 2); + DiscreteConditional expected(m % "0.51341712/1"); // regression + + // Eliminate into BN using one ordering + Ordering ordering1; + ordering1 += X(0), X(1), M(0); + HybridBayesNet::shared_ptr bn1 = fg->eliminateSequential(ordering1); + + // Check that the discrete conditional matches the expected. + auto dc1 = bn1->back()->asDiscrete(); + EXPECT(assert_equal(expected, *dc1, 1e-9)); + + // Eliminate into BN using a different ordering + Ordering ordering2; + ordering2 += X(0), X(1), M(0); + HybridBayesNet::shared_ptr bn2 = fg->eliminateSequential(ordering2); + + // Check that the discrete conditional matches the expected. + auto dc2 = bn2->back()->asDiscrete(); + EXPECT(assert_equal(expected, *dc2, 1e-9)); +} + +/********************************************************************************* + * Test for correctness via sampling. + * + * Compute the conditional P(x0, m0, x1| z0, z1) + * with measurements z0, z1. To do so, we: + * 1. Start with the corresponding Factor Graph `FG`. + * 2. Eliminate the factor graph into a Bayes Net `BN`. + * 3. Sample from the Bayes Net. + * 4. Check that the ratio `BN(x)/FG(x) = constant` for all samples `x`. + ********************************************************************************/ +TEST(HybridEstimation, CorrectnessViaSampling) { + // 1. Create the factor graph from the nonlinear factor graph. + const auto fg = createHybridGaussianFactorGraph(); + + // 2. Eliminate into BN + const HybridBayesNet::shared_ptr bn = fg->eliminateSequential(); + + // Set up sampling + std::mt19937_64 rng(11); + + // Compute the log-ratio between the Bayes net and the factor graph. + auto compute_ratio = [&](const HybridValues& sample) -> double { + return bn->evaluate(sample) / fg->probPrime(sample); + }; + + // The error evaluated by the factor graph and the Bayes net should differ by + // the normalizing term computed via the Bayes net determinant. + const HybridValues sample = bn->sample(&rng); + double expected_ratio = compute_ratio(sample); + // regression + EXPECT_DOUBLES_EQUAL(0.728588, expected_ratio, 1e-6); + + // 3. Do sampling + constexpr int num_samples = 10; + for (size_t i = 0; i < num_samples; i++) { + // Sample from the bayes net + const HybridValues sample = bn->sample(&rng); + + // 4. Check that the ratio is constant. + EXPECT_DOUBLES_EQUAL(expected_ratio, compute_ratio(sample), 1e-6); + } +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index ed6b97ab04..e5c11bf0c2 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -26,9 +26,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -40,7 +38,6 @@ #include #include -#include #include #include #include @@ -48,16 +45,20 @@ #include #include "Switching.h" - -using namespace boost::assign; +#include "TinyHybridExample.h" using namespace std; using namespace gtsam; using gtsam::symbol_shorthand::D; using gtsam::symbol_shorthand::M; +using gtsam::symbol_shorthand::N; using gtsam::symbol_shorthand::X; using gtsam::symbol_shorthand::Y; +using gtsam::symbol_shorthand::Z; + +// Set up sampling +std::mt19937_64 kRng(42); /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, Creation) { @@ -65,7 +66,7 @@ TEST(HybridGaussianFactorGraph, Creation) { HybridGaussianFactorGraph hfg; - hfg.add(HybridGaussianFactor(JacobianFactor(X(0), I_3x3, Z_3x1))); + hfg.emplace_shared(X(0), I_3x3, Z_3x1); // Define a gaussian mixture conditional P(x0|x1, c0) and add it to the factor // graph @@ -86,7 +87,7 @@ TEST(HybridGaussianFactorGraph, EliminateSequential) { // Test elimination of a single variable. HybridGaussianFactorGraph hfg; - hfg.add(HybridGaussianFactor(JacobianFactor(0, I_3x3, Z_3x1))); + hfg.emplace_shared(0, I_3x3, Z_3x1); auto result = hfg.eliminatePartialSequential(KeyVector{0}); @@ -102,7 +103,7 @@ TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { // Add priors on x0 and c1 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); - hfg.add(HybridDiscreteFactor(DecisionTreeFactor(m, {2, 8}))); + hfg.add(DecisionTreeFactor(m, {2, 8})); Ordering ordering; ordering.push_back(X(0)); @@ -116,27 +117,27 @@ TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { HybridGaussianFactorGraph hfg; - DiscreteKey m1(M(1), 2); - // Add prior on x0 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + // Add factor between x0 and x1 hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); // Add a gaussian mixture factor Ï•(x1, c1) + DiscreteKey m1(M(1), 2); DecisionTree dt( M(1), boost::make_shared(X(1), I_3x3, Z_3x1), boost::make_shared(X(1), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(1)}, {m1}, dt)); - auto result = - hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {M(1)})); + auto result = hfg.eliminateSequential(); - auto dc = result->at(2)->asDiscreteConditional(); + auto dc = result->at(2)->asDiscrete(); + CHECK(dc); DiscreteValues dv; dv[M(1)] = 0; - EXPECT_DOUBLES_EQUAL(1, dc->operator()(dv), 1e-3); + // Regression test + EXPECT_DOUBLES_EQUAL(0.62245933120185448, dc->operator()(dv), 1e-3); } /* ************************************************************************* */ @@ -160,8 +161,7 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { // Joint discrete probability table for c1, c2 hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); - HybridBayesNet::shared_ptr result = hfg.eliminateSequential( - Ordering::ColamdConstrainedLast(hfg, {M(1), M(2)})); + HybridBayesNet::shared_ptr result = hfg.eliminateSequential(); // There are 4 variables (2 continuous + 2 discrete) in the bayes net. EXPECT_LONGS_EQUAL(4, result->size()); @@ -176,16 +176,17 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - hfg.add(GaussianMixtureFactor::FromFactors( + hfg.add(GaussianMixtureFactor( {X(1)}, {{M(1), 2}}, {boost::make_shared(X(1), I_3x3, Z_3x1), boost::make_shared(X(1), I_3x3, Vector3::Ones())})); hfg.add(DecisionTreeFactor(m1, {2, 8})); - hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); + // TODO(Varun) Adding extra discrete variable not connected to continuous + // variable throws segfault + // hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); - HybridBayesTree::shared_ptr result = - hfg.eliminateMultifrontal(hfg.getHybridOrdering()); + HybridBayesTree::shared_ptr result = hfg.eliminateMultifrontal(); // The bayes tree should have 3 cliques EXPECT_LONGS_EQUAL(3, result->size()); @@ -212,10 +213,10 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { // Hybrid factor P(x1|c1) hfg.add(GaussianMixtureFactor({X(1)}, {m}, dt)); // Prior factor on c1 - hfg.add(HybridDiscreteFactor(DecisionTreeFactor(m, {2, 8}))); + hfg.add(DecisionTreeFactor(m, {2, 8})); // Get a constrained ordering keeping c1 last - auto ordering_full = hfg.getHybridOrdering(); + auto ordering_full = HybridOrdering(hfg); // Returns a Hybrid Bayes Tree with distribution P(x0|x1)P(x1|c1)P(c1) HybridBayesTree::shared_ptr hbt = hfg.eliminateMultifrontal(ordering_full); @@ -235,7 +236,7 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { hfg.add(JacobianFactor(X(1), I_3x3, X(2), -I_3x3, Z_3x1)); { - hfg.add(GaussianMixtureFactor::FromFactors( + hfg.add(GaussianMixtureFactor( {X(0)}, {{M(0), 2}}, {boost::make_shared(X(0), I_3x3, Z_3x1), boost::make_shared(X(0), I_3x3, Vector3::Ones())})); @@ -247,8 +248,7 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { hfg.add(GaussianMixtureFactor({X(2)}, {{M(1), 2}}, dt1)); } - hfg.add(HybridDiscreteFactor( - DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4"))); + hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1)); @@ -515,8 +515,7 @@ TEST(HybridGaussianFactorGraph, optimize) { hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); - auto result = - hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {C(1)})); + auto result = hfg.eliminateSequential(); HybridValues hv = result->optimize(); @@ -562,6 +561,310 @@ TEST(HybridGaussianFactorGraph, Conditionals) { EXPECT(assert_equal(expected_discrete, result.discrete())); } +/* ****************************************************************************/ +// Test hybrid gaussian factor graph error and unnormalized probabilities +TEST(HybridGaussianFactorGraph, ErrorAndProbPrime) { + Switching s(3); + + HybridGaussianFactorGraph graph = s.linearizedFactorGraph; + + HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); + + const HybridValues delta = hybridBayesNet->optimize(); + const double error = graph.error(delta); + + // regression + EXPECT(assert_equal(1.58886, error, 1e-5)); + + // Real test: + EXPECT(assert_equal(graph.probPrime(delta), exp(-error), 1e-7)); +} + +/* ****************************************************************************/ +// Test hybrid gaussian factor graph error and unnormalized probabilities +TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { + Switching s(3); + + HybridGaussianFactorGraph graph = s.linearizedFactorGraph; + + HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); + + HybridValues delta = hybridBayesNet->optimize(); + auto error_tree = graph.error(delta.continuous()); + + std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; + std::vector leaves = {0.9998558, 0.4902432, 0.5193694, 0.0097568}; + AlgebraicDecisionTree expected_error(discrete_keys, leaves); + + // regression + EXPECT(assert_equal(expected_error, error_tree, 1e-7)); + + auto probs = graph.probPrime(delta.continuous()); + std::vector prob_leaves = {0.36793249, 0.61247742, 0.59489556, + 0.99029064}; + AlgebraicDecisionTree expected_probs(discrete_keys, prob_leaves); + + // regression + EXPECT(assert_equal(expected_probs, probs, 1e-7)); +} + +/* ****************************************************************************/ +// Check that assembleGraphTree assembles Gaussian factor graphs for each +// assignment. +TEST(HybridGaussianFactorGraph, assembleGraphTree) { + using symbol_shorthand::Z; + const int num_measurements = 1; + auto fg = tiny::createHybridGaussianFactorGraph( + num_measurements, VectorValues{{Z(0), Vector1(5.0)}}); + EXPECT_LONGS_EQUAL(3, fg.size()); + + // Assemble graph tree: + auto actual = fg.assembleGraphTree(); + + // Create expected decision tree with two factor graphs: + + // Get mixture factor: + auto mixture = boost::dynamic_pointer_cast(fg.at(0)); + CHECK(mixture); + + // Get prior factor: + const auto gf = boost::dynamic_pointer_cast(fg.at(1)); + CHECK(gf); + using GF = GaussianFactor::shared_ptr; + const GF prior = gf->asGaussian(); + CHECK(prior); + + // Create DiscreteValues for both 0 and 1: + DiscreteValues d0{{M(0), 0}}, d1{{M(0), 1}}; + + // Expected decision tree with two factor graphs: + // f(x0;mode=0)P(x0) and f(x0;mode=1)P(x0) + GaussianFactorGraphTree expected{ + M(0), GaussianFactorGraph(std::vector{(*mixture)(d0), prior}), + GaussianFactorGraph(std::vector{(*mixture)(d1), prior})}; + + EXPECT(assert_equal(expected(d0), actual(d0), 1e-5)); + EXPECT(assert_equal(expected(d1), actual(d1), 1e-5)); +} + +/* ****************************************************************************/ +// Check that the factor graph unnormalized probability is proportional to the +// Bayes net probability for the given measurements. +bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, + const HybridGaussianFactorGraph &fg, size_t num_samples = 100) { + auto compute_ratio = [&](HybridValues *sample) -> double { + sample->update(measurements); // update sample with given measurements: + return bn.evaluate(*sample) / fg.probPrime(*sample); + }; + + HybridValues sample = bn.sample(&kRng); + double expected_ratio = compute_ratio(&sample); + + // Test ratios for a number of independent samples: + for (size_t i = 0; i < num_samples; i++) { + HybridValues sample = bn.sample(&kRng); + if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; + } + return true; +} + +/* ****************************************************************************/ +// Check that the factor graph unnormalized probability is proportional to the +// Bayes net probability for the given measurements. +bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, + const HybridBayesNet &posterior, size_t num_samples = 100) { + auto compute_ratio = [&](HybridValues *sample) -> double { + sample->update(measurements); // update sample with given measurements: + return bn.evaluate(*sample) / posterior.evaluate(*sample); + }; + + HybridValues sample = bn.sample(&kRng); + double expected_ratio = compute_ratio(&sample); + + // Test ratios for a number of independent samples: + for (size_t i = 0; i < num_samples; i++) { + HybridValues sample = bn.sample(&kRng); + // GTSAM_PRINT(sample); + // std::cout << "ratio: " << compute_ratio(&sample) << std::endl; + if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; + } + return true; +} + +/* ****************************************************************************/ +// Check that eliminating tiny net with 1 measurement yields correct result. +TEST(HybridGaussianFactorGraph, EliminateTiny1) { + using symbol_shorthand::Z; + const int num_measurements = 1; + const VectorValues measurements{{Z(0), Vector1(5.0)}}; + auto bn = tiny::createHybridBayesNet(num_measurements); + auto fg = bn.toFactorGraph(measurements); + EXPECT_LONGS_EQUAL(3, fg.size()); + + EXPECT(ratioTest(bn, measurements, fg)); + + // Create expected Bayes Net: + HybridBayesNet expectedBayesNet; + + // Create Gaussian mixture on X(0). + using tiny::mode; + // regression, but mean checked to be 5.0 in both cases: + const auto conditional0 = boost::make_shared( + X(0), Vector1(14.1421), I_1x1 * 2.82843), + conditional1 = boost::make_shared( + X(0), Vector1(10.1379), I_1x1 * 2.02759); + expectedBayesNet.emplace_back( + new GaussianMixture({X(0)}, {}, {mode}, {conditional0, conditional1})); + + // Add prior on mode. + expectedBayesNet.emplace_back(new DiscreteConditional(mode, "74/26")); + + // Test elimination + const auto posterior = fg.eliminateSequential(); + EXPECT(assert_equal(expectedBayesNet, *posterior, 0.01)); + + EXPECT(ratioTest(bn, measurements, *posterior)); +} + +/* ****************************************************************************/ +// Check that eliminating tiny net with 2 measurements yields correct result. +TEST(HybridGaussianFactorGraph, EliminateTiny2) { + // Create factor graph with 2 measurements such that posterior mean = 5.0. + using symbol_shorthand::Z; + const int num_measurements = 2; + const VectorValues measurements{{Z(0), Vector1(4.0)}, {Z(1), Vector1(6.0)}}; + auto bn = tiny::createHybridBayesNet(num_measurements); + auto fg = bn.toFactorGraph(measurements); + EXPECT_LONGS_EQUAL(4, fg.size()); + + // Create expected Bayes Net: + HybridBayesNet expectedBayesNet; + + // Create Gaussian mixture on X(0). + using tiny::mode; + // regression, but mean checked to be 5.0 in both cases: + const auto conditional0 = boost::make_shared( + X(0), Vector1(17.3205), I_1x1 * 3.4641), + conditional1 = boost::make_shared( + X(0), Vector1(10.274), I_1x1 * 2.0548); + expectedBayesNet.emplace_back( + new GaussianMixture({X(0)}, {}, {mode}, {conditional0, conditional1})); + + // Add prior on mode. + expectedBayesNet.emplace_back(new DiscreteConditional(mode, "23/77")); + + // Test elimination + const auto posterior = fg.eliminateSequential(); + EXPECT(assert_equal(expectedBayesNet, *posterior, 0.01)); + + EXPECT(ratioTest(bn, measurements, *posterior)); +} + +/* ****************************************************************************/ +// Test eliminating tiny net with 1 mode per measurement. +TEST(HybridGaussianFactorGraph, EliminateTiny22) { + // Create factor graph with 2 measurements such that posterior mean = 5.0. + using symbol_shorthand::Z; + const int num_measurements = 2; + const bool manyModes = true; + + // Create Bayes net and convert to factor graph. + auto bn = tiny::createHybridBayesNet(num_measurements, manyModes); + const VectorValues measurements{{Z(0), Vector1(4.0)}, {Z(1), Vector1(6.0)}}; + auto fg = bn.toFactorGraph(measurements); + EXPECT_LONGS_EQUAL(5, fg.size()); + + EXPECT(ratioTest(bn, measurements, fg)); + + // Test elimination + const auto posterior = fg.eliminateSequential(); + + EXPECT(ratioTest(bn, measurements, *posterior)); +} + +/* ****************************************************************************/ +// Test elimination of a switching network with one mode per measurement. +TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { + // Create a switching network with one mode per measurement. + HybridBayesNet bn; + + // NOTE: we add reverse topological so we can sample from the Bayes net.: + + // Add measurements: + for (size_t t : {0, 1, 2}) { + // Create Gaussian mixture on Z(t) conditioned on X(t) and mode N(t): + const auto noise_mode_t = DiscreteKey{N(t), 2}; + bn.emplace_back( + new GaussianMixture({Z(t)}, {X(t)}, {noise_mode_t}, + {GaussianConditional::sharedMeanAndStddev( + Z(t), I_1x1, X(t), Z_1x1, 0.5), + GaussianConditional::sharedMeanAndStddev( + Z(t), I_1x1, X(t), Z_1x1, 3.0)})); + + // Create prior on discrete mode N(t): + bn.emplace_back(new DiscreteConditional(noise_mode_t, "20/80")); + } + + // Add motion models: + for (size_t t : {2, 1}) { + // Create Gaussian mixture on X(t) conditioned on X(t-1) and mode M(t-1): + const auto motion_model_t = DiscreteKey{M(t), 2}; + bn.emplace_back( + new GaussianMixture({X(t)}, {X(t - 1)}, {motion_model_t}, + {GaussianConditional::sharedMeanAndStddev( + X(t), I_1x1, X(t - 1), Z_1x1, 0.2), + GaussianConditional::sharedMeanAndStddev( + X(t), I_1x1, X(t - 1), I_1x1, 0.2)})); + + // Create prior on motion model M(t): + bn.emplace_back(new DiscreteConditional(motion_model_t, "40/60")); + } + + // Create Gaussian prior on continuous X(0) using sharedMeanAndStddev: + bn.push_back(GaussianConditional::sharedMeanAndStddev(X(0), Z_1x1, 0.1)); + + // Make sure we an sample from the Bayes net: + EXPECT_LONGS_EQUAL(6, bn.sample().continuous().size()); + + // Create measurements consistent with moving right every time: + const VectorValues measurements{ + {Z(0), Vector1(0.0)}, {Z(1), Vector1(1.0)}, {Z(2), Vector1(2.0)}}; + const HybridGaussianFactorGraph fg = bn.toFactorGraph(measurements); + + // Factor graph is: + // D D + // | | + // m1 m2 + // | | + // C-x0-HC-x1-HC-x2 + // | | | + // HF HF HF + // | | | + // n0 n1 n2 + // | | | + // D D D + EXPECT_LONGS_EQUAL(11, fg.size()); + EXPECT(ratioTest(bn, measurements, fg)); + + // Do elimination of X(2) only: + auto result = fg.eliminatePartialSequential(Ordering{X(2)}); + auto fg1 = *result.second; + fg1.push_back(*result.first); + EXPECT(ratioTest(bn, measurements, fg1)); + + // Create ordering that eliminates in time order, then discrete modes: + Ordering ordering {X(2), X(1), X(0), N(0), N(1), N(2), M(1), M(2)}; + + // Do elimination: + const HybridBayesNet::shared_ptr posterior = fg.eliminateSequential(ordering); + + // Test resulting posterior Bayes net has correct size: + EXPECT_LONGS_EQUAL(8, posterior->size()); + + // TODO(dellaert): this test fails - no idea why. + EXPECT(ratioTest(bn, measurements, *posterior)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp index 18ce7f10ec..4f135f84fb 100644 --- a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp @@ -111,8 +111,7 @@ TEST(HybridGaussianElimination, IncrementalInference) { // Run update step isam.update(graph1); - auto discreteConditional_m0 = - isam[M(0)]->conditional()->asDiscreteConditional(); + auto discreteConditional_m0 = isam[M(0)]->conditional()->asDiscrete(); EXPECT(discreteConditional_m0->keys() == KeyVector({M(0)})); /********************************************************/ @@ -165,40 +164,45 @@ TEST(HybridGaussianElimination, IncrementalInference) { discrete_ordering += M(0); discrete_ordering += M(1); HybridBayesTree::shared_ptr discreteBayesTree = - expectedRemainingGraph->eliminateMultifrontal(discrete_ordering); + expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal( + discrete_ordering); DiscreteValues m00; m00[M(0)] = 0, m00[M(1)] = 0; DiscreteConditional decisionTree = - *(*discreteBayesTree)[M(1)]->conditional()->asDiscreteConditional(); + *(*discreteBayesTree)[M(1)]->conditional()->asDiscrete(); double m00_prob = decisionTree(m00); - auto discreteConditional = isam[M(1)]->conditional()->asDiscreteConditional(); + auto discreteConditional = isam[M(1)]->conditional()->asDiscrete(); - // Test if the probability values are as expected with regression tests. + // Test the probability values with regression tests. DiscreteValues assignment; - EXPECT(assert_equal(m00_prob, 0.0619233, 1e-5)); + EXPECT(assert_equal(0.0952922, m00_prob, 1e-5)); assignment[M(0)] = 0; assignment[M(1)] = 0; - EXPECT(assert_equal(m00_prob, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.0952922, (*discreteConditional)(assignment), 1e-5)); assignment[M(0)] = 1; assignment[M(1)] = 0; - EXPECT(assert_equal(0.183743, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.282758, (*discreteConditional)(assignment), 1e-5)); assignment[M(0)] = 0; assignment[M(1)] = 1; - EXPECT(assert_equal(0.204159, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.314175, (*discreteConditional)(assignment), 1e-5)); assignment[M(0)] = 1; assignment[M(1)] = 1; - EXPECT(assert_equal(0.2, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.307775, (*discreteConditional)(assignment), 1e-5)); // Check if the clique conditional generated from incremental elimination // matches that of batch elimination. - auto expectedChordal = expectedRemainingGraph->eliminateMultifrontal(); - auto expectedConditional = dynamic_pointer_cast( - (*expectedChordal)[M(1)]->conditional()->inner()); + auto expectedChordal = + expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal(); auto actualConditional = dynamic_pointer_cast( isam[M(1)]->conditional()->inner()); - EXPECT(assert_equal(*actualConditional, *expectedConditional, 1e-6)); + // Account for the probability terms from evaluating continuous FGs + DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}}; + vector probs = {0.095292197, 0.31417524, 0.28275772, 0.30777485}; + auto expectedConditional = + boost::make_shared(discrete_keys, probs); + EXPECT(assert_equal(*expectedConditional, *actualConditional, 1e-6)); } /* ****************************************************************************/ @@ -376,7 +380,7 @@ TEST(HybridGaussianISAM, NonTrivial) { Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin auto priorNoise = noiseModel::Diagonal::Sigmas( Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta - fg.emplace_nonlinear>(X(0), prior, priorNoise); + fg.emplace_shared>(X(0), prior, priorNoise); // create a noise model for the landmark measurements auto poseNoise = noiseModel::Isotropic::Sigma(3, 0.1); @@ -385,11 +389,11 @@ TEST(HybridGaussianISAM, NonTrivial) { // where X is the base link and W is the foot link. // Add connecting poses similar to PoseFactors in GTD - fg.emplace_nonlinear>(X(0), Y(0), Pose2(0, 1.0, 0), + fg.emplace_shared>(X(0), Y(0), Pose2(0, 1.0, 0), poseNoise); - fg.emplace_nonlinear>(Y(0), Z(0), Pose2(0, 1.0, 0), + fg.emplace_shared>(Y(0), Z(0), Pose2(0, 1.0, 0), poseNoise); - fg.emplace_nonlinear>(Z(0), W(0), Pose2(0, 1.0, 0), + fg.emplace_shared>(Z(0), W(0), Pose2(0, 1.0, 0), poseNoise); // Create initial estimate @@ -428,14 +432,14 @@ TEST(HybridGaussianISAM, NonTrivial) { fg.push_back(mixtureFactor); // Add equivalent of ImuFactor - fg.emplace_nonlinear>(X(0), X(1), Pose2(1.0, 0.0, 0), + fg.emplace_shared>(X(0), X(1), Pose2(1.0, 0.0, 0), poseNoise); // PoseFactors-like at k=1 - fg.emplace_nonlinear>(X(1), Y(1), Pose2(0, 1, 0), + fg.emplace_shared>(X(1), Y(1), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Y(1), Z(1), Pose2(0, 1, 0), + fg.emplace_shared>(Y(1), Z(1), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Z(1), W(1), Pose2(-1, 1, 0), + fg.emplace_shared>(Z(1), W(1), Pose2(-1, 1, 0), poseNoise); initial.insert(X(1), Pose2(1.0, 0.0, 0.0)); @@ -468,14 +472,14 @@ TEST(HybridGaussianISAM, NonTrivial) { fg.push_back(mixtureFactor); // Add equivalent of ImuFactor - fg.emplace_nonlinear>(X(1), X(2), Pose2(1.0, 0.0, 0), + fg.emplace_shared>(X(1), X(2), Pose2(1.0, 0.0, 0), poseNoise); // PoseFactors-like at k=1 - fg.emplace_nonlinear>(X(2), Y(2), Pose2(0, 1, 0), + fg.emplace_shared>(X(2), Y(2), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Y(2), Z(2), Pose2(0, 1, 0), + fg.emplace_shared>(Y(2), Z(2), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Z(2), W(2), Pose2(-2, 1, 0), + fg.emplace_shared>(Z(2), W(2), Pose2(-2, 1, 0), poseNoise); initial.insert(X(2), Pose2(2.0, 0.0, 0.0)); @@ -511,14 +515,14 @@ TEST(HybridGaussianISAM, NonTrivial) { fg.push_back(mixtureFactor); // Add equivalent of ImuFactor - fg.emplace_nonlinear>(X(2), X(3), Pose2(1.0, 0.0, 0), + fg.emplace_shared>(X(2), X(3), Pose2(1.0, 0.0, 0), poseNoise); // PoseFactors-like at k=3 - fg.emplace_nonlinear>(X(3), Y(3), Pose2(0, 1, 0), + fg.emplace_shared>(X(3), Y(3), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Y(3), Z(3), Pose2(0, 1, 0), + fg.emplace_shared>(Y(3), Z(3), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Z(3), W(3), Pose2(-3, 1, 0), + fg.emplace_shared>(Z(3), W(3), Pose2(-3, 1, 0), poseNoise); initial.insert(X(3), Pose2(3.0, 0.0, 0.0)); @@ -535,7 +539,7 @@ TEST(HybridGaussianISAM, NonTrivial) { // The final discrete graph should not be empty since we have eliminated // all continuous variables. - auto discreteTree = inc[M(3)]->conditional()->asDiscreteConditional(); + auto discreteTree = inc[M(3)]->conditional()->asDiscrete(); EXPECT_LONGS_EQUAL(3, discreteTree->size()); // Test if the optimal discrete mode assignment is (1, 1, 1). diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index f6889f132c..5b8b32b213 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -54,18 +54,20 @@ TEST(HybridFactorGraph, GaussianFactorGraph) { HybridNonlinearFactorGraph fg; // Add a simple prior factor to the nonlinear factor graph - fg.emplace_nonlinear>(X(0), 0, Isotropic::Sigma(1, 0.1)); + fg.emplace_shared>(X(0), 0, Isotropic::Sigma(1, 0.1)); // Linearization point Values linearizationPoint; linearizationPoint.insert(X(0), 0); + // Linearize the factor graph. HybridGaussianFactorGraph ghfg = *fg.linearize(linearizationPoint); + EXPECT_LONGS_EQUAL(1, ghfg.size()); - // Add a factor to the GaussianFactorGraph - ghfg.add(JacobianFactor(X(0), I_1x1, Vector1(5))); - - EXPECT_LONGS_EQUAL(2, ghfg.size()); + // Check that the error is the same for the nonlinear values. + const VectorValues zero{{X(0), Vector1(0)}}; + const HybridValues hybridValues{zero, {}, linearizationPoint}; + EXPECT_DOUBLES_EQUAL(fg.error(hybridValues), ghfg.error(hybridValues), 1e-9); } /*************************************************************************** @@ -311,8 +313,7 @@ TEST(HybridsGaussianElimination, Eliminate_x1) { Ordering ordering; ordering += X(1); - std::pair result = - EliminateHybrid(factors, ordering); + auto result = EliminateHybrid(factors, ordering); CHECK(result.first); EXPECT_LONGS_EQUAL(1, result.first->nrFrontals()); CHECK(result.second); @@ -350,7 +351,7 @@ TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) { ordering += X(1); HybridConditional::shared_ptr hybridConditionalMixture; - HybridFactor::shared_ptr factorOnModes; + boost::shared_ptr factorOnModes; std::tie(hybridConditionalMixture, factorOnModes) = EliminateHybrid(factors, ordering); @@ -364,17 +365,11 @@ TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) { // 1 parent, which is the mode EXPECT_LONGS_EQUAL(1, gaussianConditionalMixture->nrParents()); - // This is now a HybridDiscreteFactor - auto hybridDiscreteFactor = - dynamic_pointer_cast(factorOnModes); - // Access the type-erased inner object and convert to DecisionTreeFactor - auto discreteFactor = - dynamic_pointer_cast(hybridDiscreteFactor->inner()); + // This is now a discreteFactor + auto discreteFactor = dynamic_pointer_cast(factorOnModes); CHECK(discreteFactor); EXPECT_LONGS_EQUAL(1, discreteFactor->discreteKeys().size()); EXPECT(discreteFactor->root_->isLeaf() == false); - - // TODO(Varun) Test emplace_discrete } /**************************************************************************** @@ -385,11 +380,11 @@ TEST(HybridFactorGraph, Partial_Elimination) { auto linearizedFactorGraph = self.linearizedFactorGraph; - // Create ordering. + // Create ordering of only continuous variables. Ordering ordering; for (size_t k = 0; k < self.K; k++) ordering += X(k); - // Eliminate partially. + // Eliminate partially i.e. only continuous part. HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; std::tie(hybridBayesNet, remainingFactorGraph) = @@ -435,14 +430,16 @@ TEST(HybridFactorGraph, Full_Elimination) { DiscreteFactorGraph discrete_fg; // TODO(Varun) Make this a function of HybridGaussianFactorGraph? - for (HybridFactor::shared_ptr& factor : (*remainingFactorGraph_partial)) { - auto df = dynamic_pointer_cast(factor); - discrete_fg.push_back(df->inner()); + for (auto& factor : (*remainingFactorGraph_partial)) { + auto df = dynamic_pointer_cast(factor); + assert(df); + discrete_fg.push_back(df); } + ordering.clear(); for (size_t k = 0; k < self.K - 1; k++) ordering += M(k); discreteBayesNet = - *discrete_fg.eliminateSequential(ordering, EliminateForMPE); + *discrete_fg.eliminateSequential(ordering, EliminateDiscrete); } // Create ordering. @@ -499,14 +496,14 @@ TEST(HybridFactorGraph, Printing) { string expected_hybridFactorGraph = R"( size: 7 -factor 0: Continuous [x0] - +factor 0: A[x0] = [ 10 ] b = [ -10 ] No noise model -factor 1: Hybrid [x0 x1; m0]{ +factor 1: +Hybrid [x0 x1; m0]{ Choice(m0) 0 Leaf : A[x0] = [ @@ -529,7 +526,8 @@ factor 1: Hybrid [x0 x1; m0]{ No noise model } -factor 2: Hybrid [x1 x2; m1]{ +factor 2: +Hybrid [x1 x2; m1]{ Choice(m1) 0 Leaf : A[x1] = [ @@ -552,26 +550,22 @@ factor 2: Hybrid [x1 x2; m1]{ No noise model } -factor 3: Continuous [x1] - +factor 3: A[x1] = [ 10 ] b = [ -10 ] No noise model -factor 4: Continuous [x2] - +factor 4: A[x2] = [ 10 ] b = [ -10 ] No noise model -factor 5: Discrete [m0] - P( m0 ): +factor 5: P( m0 ): Leaf 0.5 -factor 6: Discrete [m1 m0] - P( m1 | m0 ): +factor 6: P( m1 | m0 ): Choice(m1) 0 Choice(m0) 0 0 Leaf 0.33333333 @@ -586,7 +580,7 @@ factor 6: Discrete [m1 m0] // Expected output for hybridBayesNet. string expected_hybridBayesNet = R"( size: 3 -factor 0: Hybrid P( x0 | x1 m0) +conditional 0: Hybrid P( x0 | x1 m0) Discrete Keys = (m0, 2), Choice(m0) 0 Leaf p(x0 | x1) @@ -601,7 +595,7 @@ factor 0: Hybrid P( x0 | x1 m0) d = [ -9.95037 ] No noise model -factor 1: Hybrid P( x1 | x2 m0 m1) +conditional 1: Hybrid P( x1 | x2 m0 m1) Discrete Keys = (m0, 2), (m1, 2), Choice(m1) 0 Choice(m0) @@ -630,29 +624,37 @@ factor 1: Hybrid P( x1 | x2 m0 m1) d = [ -10 ] No noise model -factor 2: Hybrid P( x2 | m0 m1) +conditional 2: Hybrid P( x2 | m0 m1) Discrete Keys = (m0, 2), (m1, 2), Choice(m1) 0 Choice(m0) 0 0 Leaf p(x2) R = [ 10.0494 ] d = [ -10.1489 ] + mean: 1 elements + x2: -1.0099 No noise model 0 1 Leaf p(x2) R = [ 10.0494 ] d = [ -10.1479 ] + mean: 1 elements + x2: -1.0098 No noise model 1 Choice(m0) 1 0 Leaf p(x2) R = [ 10.0494 ] d = [ -10.0504 ] + mean: 1 elements + x2: -1.0001 No noise model 1 1 Leaf p(x2) R = [ 10.0494 ] d = [ -10.0494 ] + mean: 1 elements + x2: -1 No noise model )"; @@ -674,7 +676,7 @@ TEST(HybridFactorGraph, DefaultDecisionTree) { Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin auto priorNoise = noiseModel::Diagonal::Sigmas( Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta - fg.emplace_nonlinear>(X(0), prior, priorNoise); + fg.emplace_shared>(X(0), prior, priorNoise); using PlanarMotionModel = BetweenFactor; @@ -687,7 +689,7 @@ TEST(HybridFactorGraph, DefaultDecisionTree) { moving = boost::make_shared(X(0), X(1), odometry, noise_model); std::vector motion_models = {still, moving}; - fg.emplace_hybrid( + fg.emplace_shared( contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, motion_models); // Add Range-Bearing measurements to from X0 to L0 and X1 to L1. @@ -699,9 +701,9 @@ TEST(HybridFactorGraph, DefaultDecisionTree) { double range11 = std::sqrt(4.0 + 4.0), range22 = 2.0; // Add Bearing-Range factors - fg.emplace_nonlinear>( + fg.emplace_shared>( X(0), L(0), bearing11, range11, measurementNoise); - fg.emplace_nonlinear>( + fg.emplace_shared>( X(1), L(1), bearing22, range22, measurementNoise); // Create (deliberately inaccurate) initial estimate diff --git a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp index 3bdb5ed1e0..d899091222 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp @@ -124,8 +124,7 @@ TEST(HybridNonlinearISAM, IncrementalInference) { isam.update(graph1, initial); HybridGaussianISAM bayesTree = isam.bayesTree(); - auto discreteConditional_m0 = - bayesTree[M(0)]->conditional()->asDiscreteConditional(); + auto discreteConditional_m0 = bayesTree[M(0)]->conditional()->asDiscrete(); EXPECT(discreteConditional_m0->keys() == KeyVector({M(0)})); /********************************************************/ @@ -153,7 +152,8 @@ TEST(HybridNonlinearISAM, IncrementalInference) { HybridBayesTree::shared_ptr expectedHybridBayesTree; HybridGaussianFactorGraph::shared_ptr expectedRemainingGraph; std::tie(expectedHybridBayesTree, expectedRemainingGraph) = - switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); + switching.linearizedFactorGraph + .BaseEliminateable::eliminatePartialMultifrontal(ordering); // The densities on X(1) should be the same auto x0_conditional = dynamic_pointer_cast( @@ -182,41 +182,44 @@ TEST(HybridNonlinearISAM, IncrementalInference) { discrete_ordering += M(0); discrete_ordering += M(1); HybridBayesTree::shared_ptr discreteBayesTree = - expectedRemainingGraph->eliminateMultifrontal(discrete_ordering); + expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal( + discrete_ordering); DiscreteValues m00; m00[M(0)] = 0, m00[M(1)] = 0; DiscreteConditional decisionTree = - *(*discreteBayesTree)[M(1)]->conditional()->asDiscreteConditional(); + *(*discreteBayesTree)[M(1)]->conditional()->asDiscrete(); double m00_prob = decisionTree(m00); - auto discreteConditional = - bayesTree[M(1)]->conditional()->asDiscreteConditional(); + auto discreteConditional = bayesTree[M(1)]->conditional()->asDiscrete(); - // Test if the probability values are as expected with regression tests. + // Test the probability values with regression tests. DiscreteValues assignment; - EXPECT(assert_equal(m00_prob, 0.0619233, 1e-5)); + EXPECT(assert_equal(0.0952922, m00_prob, 1e-5)); assignment[M(0)] = 0; assignment[M(1)] = 0; - EXPECT(assert_equal(m00_prob, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.0952922, (*discreteConditional)(assignment), 1e-5)); assignment[M(0)] = 1; assignment[M(1)] = 0; - EXPECT(assert_equal(0.183743, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.282758, (*discreteConditional)(assignment), 1e-5)); assignment[M(0)] = 0; assignment[M(1)] = 1; - EXPECT(assert_equal(0.204159, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.314175, (*discreteConditional)(assignment), 1e-5)); assignment[M(0)] = 1; assignment[M(1)] = 1; - EXPECT(assert_equal(0.2, (*discreteConditional)(assignment), 1e-5)); + EXPECT(assert_equal(0.307775, (*discreteConditional)(assignment), 1e-5)); // Check if the clique conditional generated from incremental elimination // matches that of batch elimination. auto expectedChordal = expectedRemainingGraph->eliminateMultifrontal(); - auto expectedConditional = dynamic_pointer_cast( - (*expectedChordal)[M(1)]->conditional()->inner()); auto actualConditional = dynamic_pointer_cast( bayesTree[M(1)]->conditional()->inner()); - EXPECT(assert_equal(*actualConditional, *expectedConditional, 1e-6)); + // Account for the probability terms from evaluating continuous FGs + DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}}; + vector probs = {0.095292197, 0.31417524, 0.28275772, 0.30777485}; + auto expectedConditional = + boost::make_shared(discrete_keys, probs); + EXPECT(assert_equal(*expectedConditional, *actualConditional, 1e-6)); } /* ****************************************************************************/ @@ -250,7 +253,8 @@ TEST(HybridNonlinearISAM, Approx_inference) { HybridBayesTree::shared_ptr unprunedHybridBayesTree; HybridGaussianFactorGraph::shared_ptr unprunedRemainingGraph; std::tie(unprunedHybridBayesTree, unprunedRemainingGraph) = - switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); + switching.linearizedFactorGraph + .BaseEliminateable::eliminatePartialMultifrontal(ordering); size_t maxNrLeaves = 5; incrementalHybrid.update(graph1, initial); @@ -353,10 +357,9 @@ TEST(HybridNonlinearISAM, Incremental_approximate) { // Run update with pruning size_t maxComponents = 5; incrementalHybrid.update(graph1, initial); + incrementalHybrid.prune(maxComponents); HybridGaussianISAM bayesTree = incrementalHybrid.bayesTree(); - bayesTree.prune(maxComponents); - // Check if we have a bayes tree with 4 hybrid nodes, // each with 2, 4, 8, and 5 (pruned) leaves respetively. EXPECT_LONGS_EQUAL(4, bayesTree.size()); @@ -378,10 +381,9 @@ TEST(HybridNonlinearISAM, Incremental_approximate) { // Run update with pruning a second time. incrementalHybrid.update(graph2, initial); + incrementalHybrid.prune(maxComponents); bayesTree = incrementalHybrid.bayesTree(); - bayesTree.prune(maxComponents); - // Check if we have a bayes tree with pruned hybrid nodes, // with 5 (pruned) leaves. CHECK_EQUAL(5, bayesTree.size()); @@ -407,7 +409,7 @@ TEST(HybridNonlinearISAM, NonTrivial) { Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin auto priorNoise = noiseModel::Diagonal::Sigmas( Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta - fg.emplace_nonlinear>(X(0), prior, priorNoise); + fg.emplace_shared>(X(0), prior, priorNoise); // create a noise model for the landmark measurements auto poseNoise = noiseModel::Isotropic::Sigma(3, 0.1); @@ -416,11 +418,11 @@ TEST(HybridNonlinearISAM, NonTrivial) { // where X is the base link and W is the foot link. // Add connecting poses similar to PoseFactors in GTD - fg.emplace_nonlinear>(X(0), Y(0), Pose2(0, 1.0, 0), + fg.emplace_shared>(X(0), Y(0), Pose2(0, 1.0, 0), poseNoise); - fg.emplace_nonlinear>(Y(0), Z(0), Pose2(0, 1.0, 0), + fg.emplace_shared>(Y(0), Z(0), Pose2(0, 1.0, 0), poseNoise); - fg.emplace_nonlinear>(Z(0), W(0), Pose2(0, 1.0, 0), + fg.emplace_shared>(Z(0), W(0), Pose2(0, 1.0, 0), poseNoise); // Create initial estimate @@ -449,14 +451,14 @@ TEST(HybridNonlinearISAM, NonTrivial) { fg.push_back(mixtureFactor); // Add equivalent of ImuFactor - fg.emplace_nonlinear>(X(0), X(1), Pose2(1.0, 0.0, 0), + fg.emplace_shared>(X(0), X(1), Pose2(1.0, 0.0, 0), poseNoise); // PoseFactors-like at k=1 - fg.emplace_nonlinear>(X(1), Y(1), Pose2(0, 1, 0), + fg.emplace_shared>(X(1), Y(1), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Y(1), Z(1), Pose2(0, 1, 0), + fg.emplace_shared>(Y(1), Z(1), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Z(1), W(1), Pose2(-1, 1, 0), + fg.emplace_shared>(Z(1), W(1), Pose2(-1, 1, 0), poseNoise); initial.insert(X(1), Pose2(1.0, 0.0, 0.0)); @@ -489,14 +491,14 @@ TEST(HybridNonlinearISAM, NonTrivial) { fg.push_back(mixtureFactor); // Add equivalent of ImuFactor - fg.emplace_nonlinear>(X(1), X(2), Pose2(1.0, 0.0, 0), + fg.emplace_shared>(X(1), X(2), Pose2(1.0, 0.0, 0), poseNoise); // PoseFactors-like at k=1 - fg.emplace_nonlinear>(X(2), Y(2), Pose2(0, 1, 0), + fg.emplace_shared>(X(2), Y(2), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Y(2), Z(2), Pose2(0, 1, 0), + fg.emplace_shared>(Y(2), Z(2), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Z(2), W(2), Pose2(-2, 1, 0), + fg.emplace_shared>(Z(2), W(2), Pose2(-2, 1, 0), poseNoise); initial.insert(X(2), Pose2(2.0, 0.0, 0.0)); @@ -532,14 +534,14 @@ TEST(HybridNonlinearISAM, NonTrivial) { fg.push_back(mixtureFactor); // Add equivalent of ImuFactor - fg.emplace_nonlinear>(X(2), X(3), Pose2(1.0, 0.0, 0), + fg.emplace_shared>(X(2), X(3), Pose2(1.0, 0.0, 0), poseNoise); // PoseFactors-like at k=3 - fg.emplace_nonlinear>(X(3), Y(3), Pose2(0, 1, 0), + fg.emplace_shared>(X(3), Y(3), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Y(3), Z(3), Pose2(0, 1, 0), + fg.emplace_shared>(Y(3), Z(3), Pose2(0, 1, 0), poseNoise); - fg.emplace_nonlinear>(Z(3), W(3), Pose2(-3, 1, 0), + fg.emplace_shared>(Z(3), W(3), Pose2(-3, 1, 0), poseNoise); initial.insert(X(3), Pose2(3.0, 0.0, 0.0)); @@ -558,7 +560,7 @@ TEST(HybridNonlinearISAM, NonTrivial) { // The final discrete graph should not be empty since we have eliminated // all continuous variables. - auto discreteTree = bayesTree[M(3)]->conditional()->asDiscreteConditional(); + auto discreteTree = bayesTree[M(3)]->conditional()->asDiscrete(); EXPECT_LONGS_EQUAL(3, discreteTree->size()); // Test if the optimal discrete mode assignment is (1, 1, 1). diff --git a/gtsam/hybrid/tests/testHybridValues.cpp b/gtsam/hybrid/tests/testHybridValues.cpp index 6f510601d4..02e1cb733d 100644 --- a/gtsam/hybrid/tests/testHybridValues.cpp +++ b/gtsam/hybrid/tests/testHybridValues.cpp @@ -32,22 +32,45 @@ using namespace std; using namespace gtsam; -TEST(HybridValues, basics) { +static const HybridValues kExample{{{99, Vector2(2, 3)}}, {{100, 3}}}; + +/* ************************************************************************* */ +TEST(HybridValues, Basics) { HybridValues values; values.insert(99, Vector2(2, 3)); values.insert(100, 3); + EXPECT(assert_equal(kExample, values)); EXPECT(assert_equal(values.at(99), Vector2(2, 3))); EXPECT(assert_equal(values.atDiscrete(100), int(3))); - values.print(); - HybridValues values2; values2.insert(100, 3); values2.insert(99, Vector2(2, 3)); - EXPECT(assert_equal(values2, values)); + EXPECT(assert_equal(kExample, values2)); +} - values2.insert(98, Vector2(2, 3)); - EXPECT(!assert_equal(values2, values)); +/* ************************************************************************* */ +// Check insert +TEST(HybridValues, Insert) { + HybridValues actual; + EXPECT(assert_equal({{}, {{100, 3}}}, // + actual.insert(DiscreteValues{{100, 3}}))); + EXPECT(assert_equal(kExample, // + actual.insert(VectorValues{{99, Vector2(2, 3)}}))); + HybridValues actual2; + EXPECT(assert_equal(kExample, actual2.insert(kExample))); +} + +/* ************************************************************************* */ +// Check update. +TEST(HybridValues, Update) { + HybridValues actual(kExample); + EXPECT(assert_equal({{{99, Vector2(2, 3)}}, {{100, 2}}}, + actual.update(DiscreteValues{{100, 2}}))); + EXPECT(assert_equal({{{99, Vector1(4)}}, {{100, 2}}}, + actual.update(VectorValues{{99, Vector1(4)}}))); + HybridValues actual2(kExample); + EXPECT(assert_equal(kExample, actual2.update(kExample))); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testMixtureFactor.cpp b/gtsam/hybrid/tests/testMixtureFactor.cpp new file mode 100644 index 0000000000..9e4d66bf27 --- /dev/null +++ b/gtsam/hybrid/tests/testMixtureFactor.cpp @@ -0,0 +1,122 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testMixtureFactor.cpp + * @brief Unit tests for MixtureFactor + * @author Varun Agrawal + * @date October 2022 + */ + +#include +#include +#include +#include +#include + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; +using noiseModel::Isotropic; +using symbol_shorthand::M; +using symbol_shorthand::X; + +/* ************************************************************************* */ +// Check iterators of empty mixture. +TEST(MixtureFactor, Constructor) { + MixtureFactor factor; + MixtureFactor::const_iterator const_it = factor.begin(); + CHECK(const_it == factor.end()); + MixtureFactor::iterator it = factor.begin(); + CHECK(it == factor.end()); +} + +/* ************************************************************************* */ +// Test .print() output. +TEST(MixtureFactor, Printing) { + DiscreteKey m1(1, 2); + double between0 = 0.0; + double between1 = 1.0; + + Vector1 sigmas = Vector1(1.0); + auto model = noiseModel::Diagonal::Sigmas(sigmas, false); + + auto f0 = + boost::make_shared>(X(1), X(2), between0, model); + auto f1 = + boost::make_shared>(X(1), X(2), between1, model); + std::vector factors{f0, f1}; + + MixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); + + std::string expected = + R"(Hybrid [x1 x2; 1] +MixtureFactor + Choice(1) + 0 Leaf Nonlinear factor on 2 keys + 1 Leaf Nonlinear factor on 2 keys +)"; + EXPECT(assert_print_equal(expected, mixtureFactor)); +} + +/* ************************************************************************* */ +static MixtureFactor getMixtureFactor() { + DiscreteKey m1(1, 2); + + double between0 = 0.0; + double between1 = 1.0; + + Vector1 sigmas = Vector1(1.0); + auto model = noiseModel::Diagonal::Sigmas(sigmas, false); + + auto f0 = + boost::make_shared>(X(1), X(2), between0, model); + auto f1 = + boost::make_shared>(X(1), X(2), between1, model); + std::vector factors{f0, f1}; + + return MixtureFactor({X(1), X(2)}, {m1}, factors); +} + +/* ************************************************************************* */ +// Test the error of the MixtureFactor +TEST(MixtureFactor, Error) { + auto mixtureFactor = getMixtureFactor(); + + Values continuousValues; + continuousValues.insert(X(1), 0); + continuousValues.insert(X(2), 1); + + AlgebraicDecisionTree error_tree = mixtureFactor.error(continuousValues); + + DiscreteKey m1(1, 2); + std::vector discrete_keys = {m1}; + std::vector errors = {0.5, 0}; + AlgebraicDecisionTree expected_error(discrete_keys, errors); + + EXPECT(assert_equal(expected_error, error_tree)); +} + +/* ************************************************************************* */ +// Test dim of the MixtureFactor +TEST(MixtureFactor, Dim) { + auto mixtureFactor = getMixtureFactor(); + EXPECT_LONGS_EQUAL(1, mixtureFactor.dim()); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testSerializationHybrid.cpp b/gtsam/hybrid/tests/testSerializationHybrid.cpp new file mode 100644 index 0000000000..bfa8b75140 --- /dev/null +++ b/gtsam/hybrid/tests/testSerializationHybrid.cpp @@ -0,0 +1,152 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testSerializationHybrid.cpp + * @brief Unit tests for hybrid serialization + * @author Varun Agrawal + * @date January 2023 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "Switching.h" + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +using namespace serializationTestHelpers; + +BOOST_CLASS_EXPORT_GUID(Factor, "gtsam_Factor"); +BOOST_CLASS_EXPORT_GUID(HybridFactor, "gtsam_HybridFactor"); +BOOST_CLASS_EXPORT_GUID(JacobianFactor, "gtsam_JacobianFactor"); +BOOST_CLASS_EXPORT_GUID(GaussianConditional, "gtsam_GaussianConditional"); +BOOST_CLASS_EXPORT_GUID(DiscreteConditional, "gtsam_DiscreteConditional"); + +BOOST_CLASS_EXPORT_GUID(DecisionTreeFactor, "gtsam_DecisionTreeFactor"); +using ADT = AlgebraicDecisionTree; +BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); +BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); +BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") + +BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor, "gtsam_GaussianMixtureFactor"); +BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor::Factors, + "gtsam_GaussianMixtureFactor_Factors"); +BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor::Factors::Leaf, + "gtsam_GaussianMixtureFactor_Factors_Leaf"); +BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor::Factors::Choice, + "gtsam_GaussianMixtureFactor_Factors_Choice"); + +BOOST_CLASS_EXPORT_GUID(GaussianMixture, "gtsam_GaussianMixture"); +BOOST_CLASS_EXPORT_GUID(GaussianMixture::Conditionals, + "gtsam_GaussianMixture_Conditionals"); +BOOST_CLASS_EXPORT_GUID(GaussianMixture::Conditionals::Leaf, + "gtsam_GaussianMixture_Conditionals_Leaf"); +BOOST_CLASS_EXPORT_GUID(GaussianMixture::Conditionals::Choice, + "gtsam_GaussianMixture_Conditionals_Choice"); +// Needed since GaussianConditional::FromMeanAndStddev uses it +BOOST_CLASS_EXPORT_GUID(noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); + +BOOST_CLASS_EXPORT_GUID(HybridBayesNet, "gtsam_HybridBayesNet"); + +/* ****************************************************************************/ +// Test GaussianMixtureFactor serialization. +TEST(HybridSerialization, GaussianMixtureFactor) { + KeyVector continuousKeys{X(0)}; + DiscreteKeys discreteKeys{{M(0), 2}}; + + auto A = Matrix::Zero(2, 1); + auto b0 = Matrix::Zero(2, 1); + auto b1 = Matrix::Ones(2, 1); + auto f0 = boost::make_shared(X(0), A, b0); + auto f1 = boost::make_shared(X(0), A, b1); + std::vector factors{f0, f1}; + + const GaussianMixtureFactor factor(continuousKeys, discreteKeys, factors); + + EXPECT(equalsObj(factor)); + EXPECT(equalsXML(factor)); + EXPECT(equalsBinary(factor)); +} + +/* ****************************************************************************/ +// Test HybridConditional serialization. +TEST(HybridSerialization, HybridConditional) { + const DiscreteKey mode(M(0), 2); + Matrix1 I = Matrix1::Identity(); + const auto conditional = boost::make_shared( + GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); + const HybridConditional hc(conditional); + + EXPECT(equalsObj(hc)); + EXPECT(equalsXML(hc)); + EXPECT(equalsBinary(hc)); +} + +/* ****************************************************************************/ +// Test GaussianMixture serialization. +TEST(HybridSerialization, GaussianMixture) { + const DiscreteKey mode(M(0), 2); + Matrix1 I = Matrix1::Identity(); + const auto conditional0 = boost::make_shared( + GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); + const auto conditional1 = boost::make_shared( + GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 3)); + const GaussianMixture gm({Z(0)}, {X(0)}, {mode}, + {conditional0, conditional1}); + + EXPECT(equalsObj(gm)); + EXPECT(equalsXML(gm)); + EXPECT(equalsBinary(gm)); +} + +/* ****************************************************************************/ +// Test HybridBayesNet serialization. +TEST(HybridSerialization, HybridBayesNet) { + Switching s(2); + HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential()); + + EXPECT(equalsObj(hbn)); + EXPECT(equalsXML(hbn)); + EXPECT(equalsBinary(hbn)); +} + +/* ****************************************************************************/ +// Test HybridBayesTree serialization. +TEST(HybridSerialization, HybridBayesTree) { + Switching s(2); + HybridBayesTree hbt = *(s.linearizedFactorGraph.eliminateMultifrontal()); + + EXPECT(equalsObj(hbt)); + EXPECT(equalsXML(hbt)); + EXPECT(equalsBinary(hbt)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/inference/BayesNet-inst.h b/gtsam/inference/BayesNet-inst.h index e792b5c032..f06008c880 100644 --- a/gtsam/inference/BayesNet-inst.h +++ b/gtsam/inference/BayesNet-inst.h @@ -31,7 +31,14 @@ namespace gtsam { template void BayesNet::print(const std::string& s, const KeyFormatter& formatter) const { - Base::print(s, formatter); + std::cout << (s.empty() ? "" : s + " ") << std::endl; + std::cout << "size: " << this->size() << std::endl; + for (size_t i = 0; i < this->size(); i++) { + const auto& conditional = this->at(i); + std::stringstream ss; + ss << "conditional " << i << ": "; + if (conditional) conditional->print(ss.str(), formatter); + } } /* ************************************************************************* */ @@ -81,6 +88,22 @@ void BayesNet::saveGraph(const std::string& filename, of.close(); } +/* ************************************************************************* */ +template +double BayesNet::logProbability(const HybridValues& x) const { + double sum = 0.; + for (const auto& gc : *this) { + if (gc) sum += gc->logProbability(x); + } + return sum; +} + +/* ************************************************************************* */ +template +double BayesNet::evaluate(const HybridValues& x) const { + return exp(-logProbability(x)); +} + /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/inference/BayesNet.h b/gtsam/inference/BayesNet.h index 1b118adc75..3e6d552810 100644 --- a/gtsam/inference/BayesNet.h +++ b/gtsam/inference/BayesNet.h @@ -25,6 +25,8 @@ namespace gtsam { +class HybridValues; + /** * A BayesNet is a tree of conditionals, stored in elimination order. * @ingroup inference @@ -39,7 +41,7 @@ class BayesNet : public FactorGraph { sharedConditional; ///< A shared pointer to a conditional protected: - /// @name Standard Constructors + /// @name Protected Constructors /// @{ /** Default constructor as an empty BayesNet */ @@ -50,6 +52,14 @@ class BayesNet : public FactorGraph { BayesNet(ITERATOR firstConditional, ITERATOR lastConditional) : Base(firstConditional, lastConditional) {} + /** + * Constructor that takes an initializer list of shared pointers. + * BayesNet bn = {make_shared(), + * ...}; + */ + BayesNet(std::initializer_list conditionals) + : Base(conditionals) {} + /// @} public: @@ -62,7 +72,6 @@ class BayesNet : public FactorGraph { const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// @} - /// @name Graph Display /// @{ @@ -80,6 +89,16 @@ class BayesNet : public FactorGraph { const KeyFormatter& keyFormatter = DefaultKeyFormatter, const DotWriter& writer = DotWriter()) const; + /// @} + /// @name HybridValues methods + /// @{ + + // Expose HybridValues version of logProbability. + double logProbability(const HybridValues& x) const; + + // Expose HybridValues version of evaluate. + double evaluate(const HybridValues& c) const; + /// @} }; diff --git a/gtsam/inference/BayesTree-inst.h b/gtsam/inference/BayesTree-inst.h index 82899b299a..055971a82b 100644 --- a/gtsam/inference/BayesTree-inst.h +++ b/gtsam/inference/BayesTree-inst.h @@ -26,11 +26,8 @@ #include #include -#include #include -using boost::assign::cref_list_of; - namespace gtsam { /* ************************************************************************* */ @@ -281,8 +278,8 @@ namespace gtsam { FactorGraphType cliqueMarginal = clique->marginal2(function); // Now, marginalize out everything that is not variable j - BayesNetType marginalBN = *cliqueMarginal.marginalMultifrontalBayesNet( - Ordering(cref_list_of<1,Key>(j)), function); + BayesNetType marginalBN = + *cliqueMarginal.marginalMultifrontalBayesNet(Ordering{j}, function); // The Bayes net should contain only one conditional for variable j, so return it return marginalBN.front(); @@ -403,7 +400,7 @@ namespace gtsam { } // now, marginalize out everything that is not variable j1 or j2 - return p_BC1C2.marginalMultifrontalBayesNet(Ordering(cref_list_of<2,Key>(j1)(j2)), function); + return p_BC1C2.marginalMultifrontalBayesNet(Ordering{j1, j2}, function); } /* ************************************************************************* */ diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index dfcc7c318e..6da7d0fe4a 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -17,6 +17,7 @@ #pragma once #include +#include #include namespace gtsam { diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index 9879a582c5..9377e8cc49 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -18,30 +18,81 @@ // \callgraph #pragma once -#include - #include +#include +#include + namespace gtsam { - /* ************************************************************************* */ - template - void Conditional::print(const std::string& s, const KeyFormatter& formatter) const { - std::cout << s << " P("; - for(Key key: frontals()) - std::cout << " " << formatter(key); - if (nrParents() > 0) - std::cout << " |"; - for(Key parent: parents()) - std::cout << " " << formatter(parent); - std::cout << ")" << std::endl; - } - - /* ************************************************************************* */ - template - bool Conditional::equals(const This& c, double tol) const - { - return nrFrontals_ == c.nrFrontals_; - } +/* ************************************************************************* */ +template +void Conditional::print( + const std::string& s, const KeyFormatter& formatter) const { + std::cout << s << " P("; + for (Key key : frontals()) std::cout << " " << formatter(key); + if (nrParents() > 0) std::cout << " |"; + for (Key parent : parents()) std::cout << " " << formatter(parent); + std::cout << ")" << std::endl; +} + +/* ************************************************************************* */ +template +bool Conditional::equals(const This& c, + double tol) const { + return nrFrontals_ == c.nrFrontals_; +} + +/* ************************************************************************* */ +template +double Conditional::logProbability( + const HybridValues& c) const { + throw std::runtime_error("Conditional::logProbability is not implemented"); +} + +/* ************************************************************************* */ +template +double Conditional::evaluate( + const HybridValues& c) const { + throw std::runtime_error("Conditional::evaluate is not implemented"); +} + +/* ************************************************************************* */ +template +double Conditional::logNormalizationConstant() + const { + throw std::runtime_error( + "Conditional::logNormalizationConstant is not implemented"); +} + +/* ************************************************************************* */ +template +double Conditional::normalizationConstant() const { + return std::exp(logNormalizationConstant()); +} +/* ************************************************************************* */ +template +template +bool Conditional::CheckInvariants( + const DERIVEDCONDITIONAL& conditional, const VALUES& values) { + const double prob_or_density = conditional.evaluate(values); + if (prob_or_density < 0.0) return false; // prob_or_density is negative. + if (std::abs(prob_or_density - conditional(values)) > 1e-9) + return false; // operator and evaluate differ + const double logProb = conditional.logProbability(values); + if (std::abs(prob_or_density - std::exp(logProb)) > 1e-9) + return false; // logProb is not consistent with prob_or_density + if (std::abs(conditional.logNormalizationConstant() - + std::log(conditional.normalizationConstant())) > 1e-9) + return false; // log normalization constant is not consistent with + // normalization constant + const double error = conditional.error(values); + if (error < 0.0) return false; // prob_or_density is negative. + const double expected = conditional.logNormalizationConstant() - error; + if (std::abs(logProb - expected) > 1e-9) + return false; // logProb is not consistent with error + return true; } + +} // namespace gtsam diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index 7594da78d0..ba7b6897e4 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -24,13 +24,39 @@ namespace gtsam { + class HybridValues; // forward declaration. + /** - * Base class for conditional densities. This class iterators and - * access to the frontal and separator keys. + * This is the base class for all conditional distributions/densities, + * which are implemented as specialized factors. This class does not store any + * data other than its keys. Derived classes store data such as matrices and + * probability tables. + * + * The `evaluate` method is used to evaluate the factor, and together with + * `logProbability` is the main methods that need to be implemented in derived + * classes. These two methods relate to the `error` method in the factor by: + * probability(x) = k exp(-error(x)) + * where k is a normalization constant making \int probability(x) == 1.0, and + * logProbability(x) = K - error(x) + * i.e., K = log(K). The normalization constant K is assumed to *not* depend + * on any argument, only (possibly) on the conditional parameters. + * This class provides a default logNormalizationConstant() == 0.0. + * + * There are four broad classes of conditionals that derive from Conditional: + * + * - \b Gaussian conditionals, implemented in \class GaussianConditional, a + * Gaussian density over a set of continuous variables. + * - \b Discrete conditionals, implemented in \class DiscreteConditional, which + * represent a discrete conditional distribution over discrete variables. + * - \b Hybrid conditional densities, such as \class GaussianMixture, which is + * a density over continuous variables given discrete/continuous parents. + * - \b Symbolic factors, used to represent a graph structure, implemented in + * \class SymbolicConditional. Only used for symbolic elimination etc. * * Derived classes *must* redefine the Factor and shared_ptr typedefs to refer * to the associated factor type and shared_ptr type of the derived class. See * SymbolicConditional and GaussianConditional for examples. + * * \nosubgrouping */ template @@ -78,6 +104,8 @@ namespace gtsam { /// @name Standard Interface /// @{ + virtual ~Conditional() {} + /** return the number of frontals */ size_t nrFrontals() const { return nrFrontals_; } @@ -98,6 +126,36 @@ namespace gtsam { /** return a view of the parent keys */ Parents parents() const { return boost::make_iterator_range(beginParents(), endParents()); } + /** + * All conditional types need to implement a `logProbability` function, for which + * exp(logProbability(x)) = evaluate(x). + */ + virtual double logProbability(const HybridValues& c) const; + + /** + * All conditional types need to implement an `evaluate` function, that yields + * a true probability. The default implementation just exponentiates logProbability. + */ + virtual double evaluate(const HybridValues& c) const; + + /// Evaluate probability density, sugar. + double operator()(const HybridValues& x) const { + return evaluate(x); + } + + /** + * All conditional types need to implement a log normalization constant to + * make it such that error>=0. + */ + virtual double logNormalizationConstant() const; + + /** Non-virtual, exponentiate logNormalizationConstant. */ + double normalizationConstant() const; + + /// @} + /// @name Advanced Interface + /// @{ + /** Iterator pointing to first frontal key. */ typename FACTOR::const_iterator beginFrontals() const { return asFactor().begin(); } @@ -110,10 +168,6 @@ namespace gtsam { /** Iterator pointing past the last parent key. */ typename FACTOR::const_iterator endParents() const { return asFactor().end(); } - /// @} - /// @name Advanced Interface - /// @{ - /** Mutable version of nrFrontals */ size_t& nrFrontals() { return nrFrontals_; } @@ -129,7 +183,30 @@ namespace gtsam { /** Mutable iterator pointing past the last parent key. */ typename FACTOR::iterator endParents() { return asFactor().end(); } + /** + * Check invariants of this conditional, given the values `x`. + * It tests: + * - evaluate >= 0.0 + * - evaluate(x) == conditional(x) + * - exp(logProbability(x)) == evaluate(x) + * - logNormalizationConstant() = log(normalizationConstant()) + * - error >= 0.0 + * - logProbability(x) == logNormalizationConstant() - error(x) + * + * @param conditional The conditional to test, as a reference to the derived type. + * @tparam VALUES HybridValues, or a more narrow type like DiscreteValues. + */ + template + static bool CheckInvariants(const DERIVEDCONDITIONAL& conditional, + const VALUES& x); + + /// @} + private: + + /// @name Serialization + /// @{ + // Cast to factor type (non-const) (casts down to derived conditional type, then up to factor type) FACTOR& asFactor() { return static_cast(static_cast(*this)); } diff --git a/gtsam/inference/EliminateableFactorGraph-inst.h b/gtsam/inference/EliminateableFactorGraph-inst.h index 35e7505c95..bebce14cd2 100644 --- a/gtsam/inference/EliminateableFactorGraph-inst.h +++ b/gtsam/inference/EliminateableFactorGraph-inst.h @@ -44,9 +44,16 @@ namespace gtsam { if (orderingType == Ordering::METIS) { Ordering computedOrdering = Ordering::Metis(asDerived()); return eliminateSequential(computedOrdering, function, variableIndex); - } else { + } else if (orderingType == Ordering::COLAMD) { Ordering computedOrdering = Ordering::Colamd(*variableIndex); return eliminateSequential(computedOrdering, function, variableIndex); + } else if (orderingType == Ordering::NATURAL) { + Ordering computedOrdering = Ordering::Natural(asDerived()); + return eliminateSequential(computedOrdering, function, variableIndex); + } else { + Ordering computedOrdering = EliminationTraitsType::DefaultOrderingFunc( + asDerived(), variableIndex); + return eliminateSequential(computedOrdering, function, variableIndex); } } } @@ -100,9 +107,16 @@ namespace gtsam { if (orderingType == Ordering::METIS) { Ordering computedOrdering = Ordering::Metis(asDerived()); return eliminateMultifrontal(computedOrdering, function, variableIndex); - } else { + } else if (orderingType == Ordering::COLAMD) { Ordering computedOrdering = Ordering::Colamd(*variableIndex); return eliminateMultifrontal(computedOrdering, function, variableIndex); + } else if (orderingType == Ordering::NATURAL) { + Ordering computedOrdering = Ordering::Natural(asDerived()); + return eliminateMultifrontal(computedOrdering, function, variableIndex); + } else { + Ordering computedOrdering = EliminationTraitsType::DefaultOrderingFunc( + asDerived(), variableIndex); + return eliminateMultifrontal(computedOrdering, function, variableIndex); } } } diff --git a/gtsam/inference/Factor.cpp b/gtsam/inference/Factor.cpp index 6fe96c7771..2590d7b59a 100644 --- a/gtsam/inference/Factor.cpp +++ b/gtsam/inference/Factor.cpp @@ -43,4 +43,10 @@ namespace gtsam { return keys_ == other.keys_; } + /* ************************************************************************* */ + double Factor::error(const HybridValues& c) const { + throw std::runtime_error("Factor::error is not implemented"); + } + + } diff --git a/gtsam/inference/Factor.h b/gtsam/inference/Factor.h index 27b85ef67c..f59a5972d1 100644 --- a/gtsam/inference/Factor.h +++ b/gtsam/inference/Factor.h @@ -29,27 +29,39 @@ #include namespace gtsam { -/// Define collection types: -typedef FastVector FactorIndices; -typedef FastSet FactorIndexSet; + + /// Define collection types: + typedef FastVector FactorIndices; + typedef FastSet FactorIndexSet; + + class HybridValues; // forward declaration of a Value type for error. /** - * This is the base class for all factor types. It is templated on a KEY type, - * which will be the type used to label variables. Key types currently in use - * in gtsam are Index with symbolic (IndexFactor, SymbolicFactorGraph) and - * Gaussian factors (GaussianFactor, JacobianFactor, HessianFactor, GaussianFactorGraph), - * and Key with nonlinear factors (NonlinearFactor, NonlinearFactorGraph). - * though currently only IndexFactor and IndexConditional derive from this - * class, using Index keys. This class does not store any data other than its - * keys. Derived classes store data such as matrices and probability tables. + * This is the base class for all factor types, as well as conditionals, + * which are implemented as specialized factors. This class does not store any + * data other than its keys. Derived classes store data such as matrices and + * probability tables. + * + * The `error` method is used to evaluate the factor, and is the only method + * that is required to be implemented in derived classes, although it has a + * default implementation that throws an exception. + * + * There are five broad classes of factors that derive from Factor: * - * Note that derived classes *must* redefine the ConditionalType and shared_ptr - * typedefs to refer to the associated conditional and shared_ptr types of the - * derived class. See IndexFactor, JacobianFactor, etc. for examples. + * - \b Nonlinear factors, such as \class NonlinearFactor and \class NoiseModelFactor, which + * represent a nonlinear likelihood function over a set of variables. + * - \b Gaussian factors, such as \class JacobianFactor and \class HessianFactor, which + * represent a Gaussian likelihood over a set of variables. + * - \b Discrete factors, such as \class DiscreteFactor and \class DecisionTreeFactor, which + * represent a discrete distribution over a set of variables. + * - \b Hybrid factors, such as \class HybridFactor, which represent a mixture of + * Gaussian and discrete distributions over a set of variables. + * - \b Symbolic factors, used to represent a graph structure, such as + * \class SymbolicFactor, only used for symbolic elimination etc. * - * This class is \b not virtual for performance reasons - derived symbolic classes, - * IndexFactor and IndexConditional, need to be created and destroyed quickly - * during symbolic elimination. GaussianFactor and NonlinearFactor are virtual. + * Note that derived classes must also redefine the `This` and `shared_ptr` + * typedefs. See JacobianFactor, etc. for examples. + * * \nosubgrouping */ class GTSAM_EXPORT Factor @@ -133,6 +145,12 @@ typedef FastSet FactorIndexSet; /** Iterator at end of involved variable keys */ const_iterator end() const { return keys_.end(); } + /** + * All factor types need to implement an error function. + * In factor graphs, this is the negative log-likelihood. + */ + virtual double error(const HybridValues& c) const; + /** * @return the number of variables involved in this factor */ @@ -157,7 +175,6 @@ typedef FastSet FactorIndexSet; bool equals(const This& other, double tol = 1e-9) const; /// @} - /// @name Advanced Interface /// @{ @@ -170,7 +187,13 @@ typedef FastSet FactorIndexSet; /** Iterator at end of involved variable keys */ iterator end() { return keys_.end(); } + /// @} + private: + + /// @name Serialization + /// @{ + /** Serialization function */ friend class boost::serialization::access; template @@ -182,4 +205,4 @@ typedef FastSet FactorIndexSet; }; -} +} // \namespace gtsam diff --git a/gtsam/inference/FactorGraph-inst.h b/gtsam/inference/FactorGraph-inst.h index d6c1d5d8ae..355fdf87ec 100644 --- a/gtsam/inference/FactorGraph-inst.h +++ b/gtsam/inference/FactorGraph-inst.h @@ -61,6 +61,16 @@ bool FactorGraph::equals(const This& fg, double tol) const { return true; } +/* ************************************************************************ */ +template +double FactorGraph::error(const HybridValues &values) const { + double error = 0.0; + for (auto &f : factors_) { + error += f->error(values); + } + return error; +} + /* ************************************************************************* */ template size_t FactorGraph::nrFactors() const { diff --git a/gtsam/inference/FactorGraph.h b/gtsam/inference/FactorGraph.h index 89fd09037c..2e9dd3d532 100644 --- a/gtsam/inference/FactorGraph.h +++ b/gtsam/inference/FactorGraph.h @@ -47,6 +47,8 @@ typedef FastVector FactorIndices; template class BayesTree; +class HybridValues; + /** Helper */ template class CRefCallPushBack { @@ -154,10 +156,22 @@ class FactorGraph { /// @} public: + /// @name Constructors + /// @{ + /// Default destructor - // Public and virtual so boost serialization can call it. + /// Public and virtual so boost serialization can call it. virtual ~FactorGraph() = default; + /** + * Constructor that takes an initializer list of shared pointers. + * FactorGraph fg = {make_shared(), ...}; + */ + template > + FactorGraph(std::initializer_list> sharedFactors) + : factors_(sharedFactors) {} + + /// @} /// @name Adding Single Factors /// @{ @@ -347,6 +361,9 @@ class FactorGraph { /** Get the last factor */ sharedFactor back() const { return factors_.back(); } + /** Add error for all factors. */ + double error(const HybridValues &values) const; + /// @} /// @name Modifying Factor Graphs (imperative, discouraged) /// @{ diff --git a/gtsam/inference/Ordering.h b/gtsam/inference/Ordering.h index 97d5bf110f..e875ed961e 100644 --- a/gtsam/inference/Ordering.h +++ b/gtsam/inference/Ordering.h @@ -50,18 +50,14 @@ class Ordering: public KeyVector { Ordering() { } + using KeyVector::KeyVector; // Inherit the KeyVector's constructors + /// Create from a container template explicit Ordering(const KEYS& keys) : Base(keys.begin(), keys.end()) { } - /// Create an ordering using iterators over keys - template - Ordering(ITERATOR firstKey, ITERATOR lastKey) : - Base(firstKey, lastKey) { - } - /// Add new variables to the ordering as ordering += key1, key2, ... Equivalent to calling /// push_back. boost::assign::list_inserter > operator+=( @@ -73,7 +69,7 @@ class Ordering: public KeyVector { /** * @brief Append new keys to the ordering as `ordering += keys`. * - * @param key + * @param keys The key vector to append to this ordering. * @return The ordering variable with appended keys. */ This& operator+=(KeyVector& keys); @@ -195,7 +191,7 @@ class Ordering: public KeyVector { KeySet src = fg.keys(); KeyVector keys(src.begin(), src.end()); std::stable_sort(keys.begin(), keys.end()); - return Ordering(keys); + return Ordering(keys.begin(), keys.end()); } /// METIS Formatting function diff --git a/gtsam/inference/tests/testKey.cpp b/gtsam/inference/tests/testKey.cpp index 98c5d36bf8..59245a53a7 100644 --- a/gtsam/inference/tests/testKey.cpp +++ b/gtsam/inference/tests/testKey.cpp @@ -21,11 +21,8 @@ #include -#include // for operator += - #include -using namespace boost::assign; using namespace std; using namespace gtsam; diff --git a/gtsam/inference/tests/testLabeledSymbol.cpp b/gtsam/inference/tests/testLabeledSymbol.cpp index 4ac171c73d..f63b438b45 100644 --- a/gtsam/inference/tests/testLabeledSymbol.cpp +++ b/gtsam/inference/tests/testLabeledSymbol.cpp @@ -19,9 +19,7 @@ #include #include -#include // for operator += -using namespace boost::assign; using namespace std; using namespace gtsam; diff --git a/gtsam/inference/tests/testOrdering.cpp b/gtsam/inference/tests/testOrdering.cpp index 1afa1cfde8..761c330b46 100644 --- a/gtsam/inference/tests/testOrdering.cpp +++ b/gtsam/inference/tests/testOrdering.cpp @@ -22,11 +22,8 @@ #include #include -#include - using namespace std; using namespace gtsam; -using namespace boost::assign; namespace example { SymbolicFactorGraph symbolicChain() { @@ -47,33 +44,33 @@ TEST(Ordering, constrained_ordering) { // unconstrained version { Ordering actual = Ordering::Colamd(symbolicGraph); - Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5)); + Ordering expected{0, 1, 2, 3, 4, 5}; EXPECT(assert_equal(expected, actual)); } // constrained version - push one set to the end { - Ordering actual = Ordering::ColamdConstrainedLast(symbolicGraph, list_of(2)(4)); - Ordering expected = Ordering(list_of(0)(1)(5)(3)(4)(2)); - EXPECT(assert_equal(expected, actual)); + Ordering actual = Ordering::ColamdConstrainedLast(symbolicGraph, {2, 4}); + Ordering expected = Ordering({0, 1, 5, 3, 4, 2}); + EXPECT(assert_equal(expected, actual)); } // constrained version - push one set to the start { - Ordering actual = Ordering::ColamdConstrainedFirst(symbolicGraph, list_of(2)(4)); - Ordering expected = Ordering(list_of(2)(4)(0)(1)(3)(5)); + Ordering actual = Ordering::ColamdConstrainedFirst(symbolicGraph, {2, 4}); + Ordering expected = Ordering({2, 4, 0, 1, 3, 5}); EXPECT(assert_equal(expected, actual)); } // Make sure giving empty constraints does not break the code { Ordering actual = Ordering::ColamdConstrainedLast(symbolicGraph, {}); - Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5)); + Ordering expected = Ordering({0, 1, 2, 3, 4, 5}); EXPECT(assert_equal(expected, actual)); } { Ordering actual = Ordering::ColamdConstrainedFirst(symbolicGraph, {}); - Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5)); + Ordering expected = Ordering({0, 1, 2, 3, 4, 5}); EXPECT(assert_equal(expected, actual)); } @@ -81,11 +78,11 @@ TEST(Ordering, constrained_ordering) { SymbolicFactorGraph emptyGraph; Ordering empty; { - Ordering actual = Ordering::ColamdConstrainedLast(emptyGraph, list_of(2)(4)); + Ordering actual = Ordering::ColamdConstrainedLast(emptyGraph, {2, 4}); EXPECT(assert_equal(empty, actual)); } { - Ordering actual = Ordering::ColamdConstrainedFirst(emptyGraph, list_of(2)(4)); + Ordering actual = Ordering::ColamdConstrainedFirst(emptyGraph, {2, 4}); EXPECT(assert_equal(empty, actual)); } } @@ -105,7 +102,7 @@ TEST(Ordering, grouped_constrained_ordering) { constraints[5] = 2; Ordering actual = Ordering::ColamdConstrained(symbolicGraph, constraints); - Ordering expected = list_of(0)(1)(3)(2)(4)(5); + Ordering expected{0, 1, 3, 2, 4, 5}; EXPECT(assert_equal(expected, actual)); } @@ -139,9 +136,11 @@ TEST(Ordering, csr_format) { MetisIndex mi(symbolicGraph); - vector xadjExpected, adjExpected; - xadjExpected += 0, 2, 5, 8, 11, 13, 16, 20, 24, 28, 31, 33, 36, 39, 42, 44; - adjExpected += 1, 5, 0, 2, 6, 1, 3, 7, 2, 4, 8, 3, 9, 0, 6, 10, 1, 5, 7, 11, 2, 6, 8, 12, 3, 7, 9, 13, 4, 8, 14, 5, 11, 6, 10, 12, 7, 11, 13, 8, 12, 14, 9, 13; + const vector xadjExpected{0, 2, 5, 8, 11, 13, 16, 20, + 24, 28, 31, 33, 36, 39, 42, 44}, + adjExpected{1, 5, 0, 2, 6, 1, 3, 7, 2, 4, 8, 3, 9, 0, 6, + 10, 1, 5, 7, 11, 2, 6, 8, 12, 3, 7, 9, 13, 4, 8, + 14, 5, 11, 6, 10, 12, 7, 11, 13, 8, 12, 14, 9, 13}; EXPECT(xadjExpected == mi.xadj()); EXPECT(adjExpected.size() == mi.adj().size()); @@ -161,9 +160,8 @@ TEST(Ordering, csr_format_2) { MetisIndex mi(symbolicGraph); - vector xadjExpected, adjExpected; - xadjExpected += 0, 1, 4, 6, 8, 10; - adjExpected += 1, 0, 2, 4, 1, 3, 2, 4, 1, 3; + const std::vector xadjExpected{0, 1, 4, 6, 8, 10}, + adjExpected{1, 0, 2, 4, 1, 3, 2, 4, 1, 3}; EXPECT(xadjExpected == mi.xadj()); EXPECT(adjExpected.size() == mi.adj().size()); @@ -183,9 +181,8 @@ TEST(Ordering, csr_format_3) { MetisIndex mi(symbolicGraph); - vector xadjExpected, adjExpected; - xadjExpected += 0, 1, 4, 6, 8, 10; - adjExpected += 1, 0, 2, 4, 1, 3, 2, 4, 1, 3; + const std::vector xadjExpected{0, 1, 4, 6, 8, 10}, + adjExpected{1, 0, 2, 4, 1, 3, 2, 4, 1, 3}; //size_t minKey = mi.minKey(); vector adjAcutal = mi.adj(); @@ -202,24 +199,18 @@ TEST(Ordering, csr_format_3) { /* ************************************************************************* */ TEST(Ordering, AppendVector) { using symbol_shorthand::X; + KeyVector keys{X(0), X(1), X(2)}; Ordering actual; - KeyVector keys = {X(0), X(1), X(2)}; actual += keys; - Ordering expected; - expected += X(0); - expected += X(1); - expected += X(2); + Ordering expected{X(0), X(1), X(2)}; EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ TEST(Ordering, Contains) { using symbol_shorthand::X; - Ordering ordering; - ordering += X(0); - ordering += X(1); - ordering += X(2); + Ordering ordering{X(0), X(1), X(2)}; EXPECT(ordering.contains(X(1))); EXPECT(!ordering.contains(X(4))); @@ -239,9 +230,8 @@ TEST(Ordering, csr_format_4) { MetisIndex mi(symbolicGraph); - vector xadjExpected, adjExpected; - xadjExpected += 0, 1, 3, 5, 7, 9, 10; - adjExpected += 1, 0, 2, 1, 3, 2, 4, 3, 5, 4; + const vector xadjExpected{0, 1, 3, 5, 7, 9, 10}, + adjExpected{1, 0, 2, 1, 3, 2, 4, 3, 5, 4}; vector adjAcutal = mi.adj(); vector xadjActual = mi.xadj(); @@ -274,9 +264,7 @@ TEST(Ordering, metis) { MetisIndex mi(symbolicGraph); - vector xadjExpected, adjExpected; - xadjExpected += 0, 1, 3, 4; - adjExpected += 1, 0, 2, 1; + const vector xadjExpected{0, 1, 3, 4}, adjExpected{1, 0, 2, 1}; EXPECT(xadjExpected == mi.xadj()); EXPECT(adjExpected.size() == mi.adj().size()); @@ -303,7 +291,7 @@ TEST(Ordering, MetisLoop) { // | - P( 4 | 0 3) // | | - P( 5 | 0 4) // | - P( 2 | 1 3) - Ordering expected = Ordering(list_of(5)(4)(2)(1)(0)(3)); + Ordering expected = Ordering({5, 4, 2, 1, 0, 3}); EXPECT(assert_equal(expected, actual)); } #elif defined(_WIN32) @@ -313,7 +301,7 @@ TEST(Ordering, MetisLoop) { // | - P( 3 | 5 2) // | | - P( 4 | 5 3) // | - P( 1 | 0 2) - Ordering expected = Ordering(list_of(4)(3)(1)(0)(5)(2)); + Ordering expected = Ordering({4, 3, 1, 0, 5, 2}); EXPECT(assert_equal(expected, actual)); } #else @@ -323,7 +311,7 @@ TEST(Ordering, MetisLoop) { // | - P( 2 | 4 1) // | | - P( 3 | 4 2) // | - P( 5 | 0 1) - Ordering expected = Ordering(list_of(3)(2)(5)(0)(4)(1)); + Ordering expected = Ordering({3, 2, 5, 0, 4, 1}); EXPECT(assert_equal(expected, actual)); } #endif @@ -347,7 +335,7 @@ TEST(Ordering, MetisSingleNode) { symbolicGraph.push_factor(7); Ordering actual = Ordering::Create(Ordering::METIS, symbolicGraph); - Ordering expected = Ordering(list_of(7)); + Ordering expected = Ordering({7}); EXPECT(assert_equal(expected, actual)); } #endif @@ -365,7 +353,7 @@ TEST(Ordering, Create) { //| | | - P( 1 | 2) //| | | | - P( 0 | 1) Ordering actual = Ordering::Create(Ordering::COLAMD, symbolicGraph); - Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5)); + Ordering expected = Ordering({0, 1, 2, 3, 4, 5}); EXPECT(assert_equal(expected, actual)); } @@ -376,7 +364,7 @@ TEST(Ordering, Create) { //- P( 1 0 2) //| - P( 3 4 | 2) //| | - P( 5 | 4) - Ordering expected = Ordering(list_of(5)(3)(4)(1)(0)(2)); + Ordering expected = Ordering({5, 3, 4, 1, 0, 2}); EXPECT(assert_equal(expected, actual)); } #endif diff --git a/gtsam/inference/tests/testVariableSlots.cpp b/gtsam/inference/tests/testVariableSlots.cpp index a7a0945e79..b030331c14 100644 --- a/gtsam/inference/tests/testVariableSlots.cpp +++ b/gtsam/inference/tests/testVariableSlots.cpp @@ -22,11 +22,8 @@ #include #include -#include - using namespace gtsam; using namespace std; -using namespace boost::assign; /* ************************************************************************* */ TEST(VariableSlots, constructor) { @@ -41,12 +38,12 @@ TEST(VariableSlots, constructor) { static const size_t none = numeric_limits::max(); VariableSlots expected((SymbolicFactorGraph())); - expected[0] += none, 0, 0, none; - expected[1] += none, 1, none, none; - expected[2] += 0, none, 1, none; - expected[3] += 1, none, none, none; - expected[5] += none, none, none, 0; - expected[9] += none, none, none, 1; + expected[0] = {none, 0, 0, none}; + expected[1] = {none, 1, none, none}; + expected[2] = {0, none, 1, none}; + expected[3] = {1, none, none, none}; + expected[5] = {none, none, none, 0}; + expected[9] = {none, none, none, 1}; CHECK(assert_equal(expected, actual)); } diff --git a/gtsam/linear/Errors.cpp b/gtsam/linear/Errors.cpp index 41c6c3d09e..92f7bb4b81 100644 --- a/gtsam/linear/Errors.cpp +++ b/gtsam/linear/Errors.cpp @@ -26,19 +26,18 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -Errors::Errors(){} - -/* ************************************************************************* */ -Errors::Errors(const VectorValues& V) { - for(const Vector& e: V | boost::adaptors::map_values) { - push_back(e); +Errors createErrors(const VectorValues& V) { + Errors result; + for (const Vector& e : V | boost::adaptors::map_values) { + result.push_back(e); } + return result; } /* ************************************************************************* */ -void Errors::print(const std::string& s) const { +void print(const Errors& e, const string& s) { cout << s << endl; - for(const Vector& v: *this) + for(const Vector& v: e) gtsam::print(v); } @@ -51,50 +50,49 @@ struct equalsVector : public std::function { } }; -bool Errors::equals(const Errors& expected, double tol) const { - if( size() != expected.size() ) return false; - return equal(begin(),end(),expected.begin(),equalsVector(tol)); +bool equality(const Errors& actual, const Errors& expected, double tol) { + if (actual.size() != expected.size()) return false; + return equal(actual.begin(), actual.end(), expected.begin(), + equalsVector(tol)); } /* ************************************************************************* */ -Errors Errors::operator+(const Errors& b) const { +Errors operator+(const Errors& a, const Errors& b) { #ifndef NDEBUG - size_t m = size(); + size_t m = a.size(); if (b.size()!=m) throw(std::invalid_argument("Errors::operator+: incompatible sizes")); #endif Errors result; Errors::const_iterator it = b.begin(); - for(const Vector& ai: *this) + for(const Vector& ai: a) result.push_back(ai + *(it++)); return result; } /* ************************************************************************* */ -Errors Errors::operator-(const Errors& b) const { +Errors operator-(const Errors& a, const Errors& b) { #ifndef NDEBUG - size_t m = size(); + size_t m = a.size(); if (b.size()!=m) throw(std::invalid_argument("Errors::operator-: incompatible sizes")); #endif Errors result; Errors::const_iterator it = b.begin(); - for(const Vector& ai: *this) + for(const Vector& ai: a) result.push_back(ai - *(it++)); return result; } /* ************************************************************************* */ -Errors Errors::operator-() const { +Errors operator-(const Errors& a) { Errors result; - for(const Vector& ai: *this) + for(const Vector& ai: a) result.push_back(-ai); return result; } - - /* ************************************************************************* */ double dot(const Errors& a, const Errors& b) { #ifndef NDEBUG @@ -105,7 +103,7 @@ double dot(const Errors& a, const Errors& b) { double result = 0.0; Errors::const_iterator it = b.begin(); for(const Vector& ai: a) - result += gtsam::dot(ai, *(it++)); + result += gtsam::dot(ai, *(it++)); return result; } @@ -116,11 +114,6 @@ void axpy(double alpha, const Errors& x, Errors& y) { yi += alpha * (*(it++)); } -/* ************************************************************************* */ -void print(const Errors& a, const string& s) { - a.print(s); -} - /* ************************************************************************* */ } // gtsam diff --git a/gtsam/linear/Errors.h b/gtsam/linear/Errors.h index f6e147084b..faf83d4d11 100644 --- a/gtsam/linear/Errors.h +++ b/gtsam/linear/Errors.h @@ -20,59 +20,54 @@ #pragma once #include -#include #include +#include #include namespace gtsam { - // Forward declarations - class VectorValues; - - /** vector of errors */ - class Errors : public FastList { - - public: - - GTSAM_EXPORT Errors() ; - - /** break V into pieces according to its start indices */ - GTSAM_EXPORT Errors(const VectorValues&V); +// Forward declarations +class VectorValues; - /** print */ - GTSAM_EXPORT void print(const std::string& s = "Errors") const; +/// Errors is a vector of errors. +using Errors = FastList; - /** equals, for unit testing */ - GTSAM_EXPORT bool equals(const Errors& expected, double tol=1e-9) const; +/// Break V into pieces according to its start indices. +GTSAM_EXPORT Errors createErrors(const VectorValues& V); - /** Addition */ - GTSAM_EXPORT Errors operator+(const Errors& b) const; +/// Print an Errors instance. +GTSAM_EXPORT void print(const Errors& e, const std::string& s = "Errors"); - /** subtraction */ - GTSAM_EXPORT Errors operator-(const Errors& b) const; +// Check equality for unit testing. +GTSAM_EXPORT bool equality(const Errors& actual, const Errors& expected, + double tol = 1e-9); - /** negation */ - GTSAM_EXPORT Errors operator-() const ; +/// Addition. +GTSAM_EXPORT Errors operator+(const Errors& a, const Errors& b); - }; // Errors +/// Subtraction. +GTSAM_EXPORT Errors operator-(const Errors& a, const Errors& b); - /** - * dot product - */ - GTSAM_EXPORT double dot(const Errors& a, const Errors& b); +/// Negation. +GTSAM_EXPORT Errors operator-(const Errors& a); - /** - * BLAS level 2 style - */ - GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y); +/// Dot product. +GTSAM_EXPORT double dot(const Errors& a, const Errors& b); - /** print with optional string */ - GTSAM_EXPORT void print(const Errors& a, const std::string& s = "Error"); +/// BLAS level 2 style AXPY, `y := alpha*x + y` +GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y); - /// traits - template<> - struct traits : public Testable { - }; +/// traits +template <> +struct traits { + static void Print(const Errors& e, const std::string& str = "") { + print(e, str); + } + static bool Equals(const Errors& actual, const Errors& expected, + double tol = 1e-8) { + return equality(actual, expected, tol); + } +}; -} //\ namespace gtsam +} // namespace gtsam diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 6dcf662a90..2e62d2d42d 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -46,7 +46,8 @@ namespace gtsam { return optimize(solution); } - VectorValues GaussianBayesNet::optimize(VectorValues solution) const { + VectorValues GaussianBayesNet::optimize(const VectorValues& given) const { + VectorValues solution = given; // (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas) // solve each node in reverse topological sort order (parents first) for (auto cg : boost::adaptors::reverse(*this)) { @@ -64,8 +65,9 @@ namespace gtsam { return sample(result, rng); } - VectorValues GaussianBayesNet::sample(VectorValues result, + VectorValues GaussianBayesNet::sample(const VectorValues& given, std::mt19937_64* rng) const { + VectorValues result(given); // sample each node in reverse topological sort order (parents first) for (auto cg : boost::adaptors::reverse(*this)) { const VectorValues sampled = cg->sample(result, rng); @@ -79,7 +81,7 @@ namespace gtsam { return sample(&kRandomNumberGenerator); } - VectorValues GaussianBayesNet::sample(VectorValues given) const { + VectorValues GaussianBayesNet::sample(const VectorValues& given) const { return sample(given, &kRandomNumberGenerator); } @@ -102,7 +104,25 @@ namespace gtsam { /* ************************************************************************* */ double GaussianBayesNet::error(const VectorValues& x) const { - return GaussianFactorGraph(*this).error(x); + double sum = 0.; + for (const auto& gc : *this) { + if (gc) sum += gc->error(x); + } + return sum; + } + + /* ************************************************************************* */ + double GaussianBayesNet::logProbability(const VectorValues& x) const { + double sum = 0.; + for (const auto& gc : *this) { + if (gc) sum += gc->logProbability(x); + } + return sum; + } + + /* ************************************************************************* */ + double GaussianBayesNet::evaluate(const VectorValues& x) const { + return exp(logProbability(x)); } /* ************************************************************************* */ @@ -217,14 +237,7 @@ namespace gtsam { double GaussianBayesNet::logDeterminant() const { double logDet = 0.0; for (const sharedConditional& cg : *this) { - if (cg->get_model()) { - Vector diag = cg->R().diagonal(); - cg->get_model()->whitenInPlace(diag); - logDet += diag.unaryExpr([](double x) { return log(x); }).sum(); - } else { - logDet += - cg->R().diagonal().unaryExpr([](double x) { return log(x); }).sum(); - } + logDet += cg->logDeterminant(); } return logDet; } diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index 83328576f2..28397a88a7 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -65,8 +65,17 @@ namespace gtsam { explicit GaussianBayesNet(const FactorGraph& graph) : Base(graph) {} + /** + * Constructor that takes an initializer list of shared pointers. + * BayesNet bn = {make_shared(), ...}; + */ + template + GaussianBayesNet( + std::initializer_list > conditionals) + : Base(conditionals) {} + /// Destructor - virtual ~GaussianBayesNet() {} + virtual ~GaussianBayesNet() = default; /// @} @@ -88,12 +97,30 @@ namespace gtsam { /// @name Standard Interface /// @{ + /// Sum error over all variables. + double error(const VectorValues& x) const; + + /// Sum logProbability over all variables. + double logProbability(const VectorValues& x) const; + + /** + * Calculate probability density for given values `x`: + * exp(logProbability) + * where x is the vector of values. + */ + double evaluate(const VectorValues& x) const; + + /// Evaluate probability density, sugar. + double operator()(const VectorValues& x) const { + return evaluate(x); + } + /// Solve the GaussianBayesNet, i.e. return \f$ x = R^{-1}*d \f$, by /// back-substitution VectorValues optimize() const; /// Version of optimize for incomplete BayesNet, given missing variables - VectorValues optimize(const VectorValues given) const; + VectorValues optimize(const VectorValues& given) const; /** * Sample using ancestral sampling @@ -110,13 +137,13 @@ namespace gtsam { * VectorValues given = ...; * auto sample = gbn.sample(given, &rng); */ - VectorValues sample(VectorValues given, std::mt19937_64* rng) const; + VectorValues sample(const VectorValues& given, std::mt19937_64* rng) const; /// Sample using ancestral sampling, use default rng VectorValues sample() const; /// Sample from an incomplete BayesNet, use default rng - VectorValues sample(VectorValues given) const; + VectorValues sample(const VectorValues& given) const; /** * Return ordering corresponding to a topological sort. @@ -187,9 +214,6 @@ namespace gtsam { * allocateVectorValues */ VectorValues gradientAtZero() const; - /** 0.5 * sum of squared Mahalanobis distances. */ - double error(const VectorValues& x) const; - /** * Computes the determinant of a GassianBayesNet. A GaussianBayesNet is an upper triangular * matrix and for an upper triangular matrix determinant is the product of the diagonal @@ -222,6 +246,14 @@ namespace gtsam { VectorValues backSubstituteTranspose(const VectorValues& gx) const; /// @} + /// @name HybridValues methods. + /// @{ + + using Base::evaluate; // Expose evaluate(const HybridValues&) method.. + using Base::logProbability; // Expose logProbability(const HybridValues&) method.. + using Base::error; // Expose error(const HybridValues&) method.. + + /// @} private: /** Serialization function */ diff --git a/gtsam/linear/GaussianBayesTree-inl.h b/gtsam/linear/GaussianBayesTree-inl.h index d781533e67..e734206442 100644 --- a/gtsam/linear/GaussianBayesTree-inl.h +++ b/gtsam/linear/GaussianBayesTree-inl.h @@ -43,7 +43,7 @@ double logDeterminant(const typename BAYESTREE::sharedClique& clique) { double result = 0.0; // this clique - result += clique->conditional()->R().diagonal().unaryExpr(std::ptr_fun(log)).sum(); + result += clique->conditional()->logDeterminant(); // sum of children for(const typename BAYESTREE::sharedClique& child: clique->children_) diff --git a/gtsam/linear/GaussianBayesTree.cpp b/gtsam/linear/GaussianBayesTree.cpp index a83475e26b..b35252e729 100644 --- a/gtsam/linear/GaussianBayesTree.cpp +++ b/gtsam/linear/GaussianBayesTree.cpp @@ -50,15 +50,7 @@ namespace gtsam { const GaussianBayesTreeClique::shared_ptr& clique, LogDeterminantData& parentSum) { auto cg = clique->conditional(); - double logDet; - if (cg->get_model()) { - Vector diag = cg->R().diagonal(); - cg->get_model()->whitenInPlace(diag); - logDet = diag.unaryExpr([](double x) { return log(x); }).sum(); - } else { - logDet = - cg->R().diagonal().unaryExpr([](double x) { return log(x); }).sum(); - } + double logDet = cg->logDeterminant(); // Add the current clique's log-determinant to the overall sum (*parentSum.logDet) += logDet; return parentSum; diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 9515776416..cb8726d7a4 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #ifdef __GNUC__ @@ -34,6 +35,7 @@ #include #include #include +#include // In Wrappers we have no access to this so have a default ready static std::mt19937_64 kRandomNumberGenerator(42); @@ -63,13 +65,24 @@ namespace gtsam { : BaseFactor(key, R, parent1, S, parent2, T, d, sigmas), BaseConditional(1) {} + /* ************************************************************************ */ + GaussianConditional GaussianConditional::FromMeanAndStddev(Key key, + const Vector& mu, + double sigma) { + // |Rx - d| = |x - mu|/sigma + const Matrix R = Matrix::Identity(mu.size(), mu.size()); + const Vector& d = mu; + return GaussianConditional(key, d, R, + noiseModel::Isotropic::Sigma(mu.size(), sigma)); + } + /* ************************************************************************ */ GaussianConditional GaussianConditional::FromMeanAndStddev( Key key, const Matrix& A, Key parent, const Vector& b, double sigma) { // |Rx + Sy - d| = |x-(Ay + b)|/sigma const Matrix R = Matrix::Identity(b.size(), b.size()); const Matrix S = -A; - const Vector d = b; + const Vector& d = b; return GaussianConditional(key, d, R, parent, S, noiseModel::Isotropic::Sigma(b.size(), sigma)); } @@ -82,7 +95,7 @@ namespace gtsam { const Matrix R = Matrix::Identity(b.size(), b.size()); const Matrix S = -A1; const Matrix T = -A2; - const Vector d = b; + const Vector& d = b; return GaussianConditional(key, d, R, parent1, S, parent2, T, noiseModel::Isotropic::Sigma(b.size(), sigma)); } @@ -109,6 +122,10 @@ namespace gtsam { << endl; } cout << formatMatrixIndented(" d = ", getb(), true) << "\n"; + if (nrParents() == 0) { + const auto mean = solve({}); // solve for mean. + mean.print(" mean", formatter); + } if (model_) model_->print(" Noise model: "); else @@ -155,6 +172,47 @@ namespace gtsam { } } + /* ************************************************************************* */ + double GaussianConditional::logDeterminant() const { + if (get_model()) { + Vector diag = R().diagonal(); + get_model()->whitenInPlace(diag); + return diag.unaryExpr([](double x) { return log(x); }).sum(); + } else { + return R().diagonal().unaryExpr([](double x) { return log(x); }).sum(); + } + } + + /* ************************************************************************* */ + // normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) + // log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + double GaussianConditional::logNormalizationConstant() const { + constexpr double log2pi = 1.8378770664093454835606594728112; + size_t n = d().size(); + // log det(Sigma)) = - 2.0 * logDeterminant() + return - 0.5 * n * log2pi + logDeterminant(); + } + + /* ************************************************************************* */ + // density = k exp(-error(x)) + // log = log(k) - error(x) + double GaussianConditional::logProbability(const VectorValues& x) const { + return logNormalizationConstant() - error(x); + } + + double GaussianConditional::logProbability(const HybridValues& x) const { + return logProbability(x.continuous()); + } + + /* ************************************************************************* */ + double GaussianConditional::evaluate(const VectorValues& x) const { + return exp(logProbability(x)); + } + + double GaussianConditional::evaluate(const HybridValues& x) const { + return evaluate(x.continuous()); + } + /* ************************************************************************* */ VectorValues GaussianConditional::solve(const VectorValues& x) const { // Concatenate all vector values that correspond to parent variables @@ -215,7 +273,7 @@ namespace gtsam { Vector frontalVec = gy.vector(KeyVector(beginFrontals(), endFrontals())); frontalVec = R().transpose().triangularView().solve(frontalVec); - // Check for indeterminant solution + // Check for indeterminate solution if (frontalVec.hasNaN()) throw IndeterminantLinearSystemException(this->keys().front()); for (const_iterator it = beginParents(); it!= endParents(); it++) @@ -285,14 +343,12 @@ namespace gtsam { "GaussianConditional::sample can only be called on single variable " "conditionals"); } - if (!model_) { - throw std::invalid_argument( - "GaussianConditional::sample can only be called if a diagonal noise " - "model was specified at construction."); - } + VectorValues solution = solve(parentsValues); Key key = firstFrontalKey(); - const Vector& sigmas = model_->sigmas(); + // The vector of sigma values for sampling. + // If no model, initialize sigmas to 1, else to model sigmas + const Vector& sigmas = (!model_) ? Vector::Ones(rows()) : model_->sigmas(); solution[key] += Sampler::sampleDiagonal(sigmas, rng); return solution; } diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 4822e3eaf7..4611e30d06 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -34,7 +34,7 @@ namespace gtsam { /** * A GaussianConditional functions as the node in a Bayes network. * It has a set of parents y,z, etc. and implements a probability density on x. - * The negative log-probability is given by \f$ \frac{1}{2} |Rx - (d - Sy - Tz - ...)|^2 \f$ + * The negative log-density is given by \f$ \frac{1}{2} |Rx - (d - Sy - Tz - ...)|^2 \f$ * @ingroup linear */ class GTSAM_EXPORT GaussianConditional : @@ -84,17 +84,28 @@ namespace gtsam { const KEYS& keys, size_t nrFrontals, const VerticalBlockMatrix& augmentedMatrix, const SharedDiagonal& sigmas = SharedDiagonal()); - /// Construct from mean A1 p1 + b and standard deviation. + /// Construct from mean `mu` and standard deviation `sigma`. + static GaussianConditional FromMeanAndStddev(Key key, const Vector& mu, + double sigma); + + /// Construct from conditional mean `A1 p1 + b` and standard deviation. static GaussianConditional FromMeanAndStddev(Key key, const Matrix& A, Key parent, const Vector& b, double sigma); - /// Construct from mean A1 p1 + A2 p2 + b and standard deviation. + /// Construct from conditional mean `A1 p1 + A2 p2 + b` and standard + /// deviation `sigma`. static GaussianConditional FromMeanAndStddev(Key key, // const Matrix& A1, Key parent1, const Matrix& A2, Key parent2, const Vector& b, double sigma); + /// Create shared pointer by forwarding arguments to fromMeanAndStddev. + template + static shared_ptr sharedMeanAndStddev(Args&&... args) { + return boost::make_shared(FromMeanAndStddev(std::forward(args)...)); + } + /** Combine several GaussianConditional into a single dense GC. The conditionals enumerated by * \c first and \c last must be in increasing order, meaning that the parents of any * conditional may not include a conditional coming before it. @@ -121,17 +132,32 @@ namespace gtsam { /// @name Standard Interface /// @{ - /** Return a view of the upper-triangular R block of the conditional */ - constABlock R() const { return Ab_.range(0, nrFrontals()); } + /** + * normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) + * log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + */ + double logNormalizationConstant() const override; - /** Get a view of the parent blocks. */ - constABlock S() const { return Ab_.range(nrFrontals(), size()); } + /** + * Calculate log-probability log(evaluate(x)) for given values `x`: + * -error(x) - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + * where x is the vector of values, and Sigma is the covariance matrix. + * This differs from error as it is log, not negative log, and it + * includes the normalization constant. + */ + double logProbability(const VectorValues& x) const; - /** Get a view of the S matrix for the variable pointed to by the given key iterator */ - constABlock S(const_iterator it) const { return BaseFactor::getA(it); } + /** + * Calculate probability density for given values `x`: + * exp(logProbability(x)) == exp(-GaussianFactor::error(x)) / sqrt((2*pi)^n*det(Sigma)) + * where x is the vector of values, and Sigma is the covariance matrix. + */ + double evaluate(const VectorValues& x) const; - /** Get a view of the r.h.s. vector d */ - const constBVector d() const { return BaseFactor::getb(); } + /// Evaluate probability density, sugar. + double operator()(const VectorValues& x) const { + return evaluate(x); + } /** * Solves a conditional Gaussian and writes the solution into the entries of @@ -185,6 +211,68 @@ namespace gtsam { VectorValues sample(const VectorValues& parentsValues) const; /// @} + /// @name Linear algebra. + /// @{ + + /** Return a view of the upper-triangular R block of the conditional */ + constABlock R() const { return Ab_.range(0, nrFrontals()); } + + /** Get a view of the parent blocks. */ + constABlock S() const { return Ab_.range(nrFrontals(), size()); } + + /** Get a view of the S matrix for the variable pointed to by the given key iterator */ + constABlock S(const_iterator it) const { return BaseFactor::getA(it); } + + /** Get a view of the r.h.s. vector d */ + const constBVector d() const { return BaseFactor::getb(); } + + /** + * @brief Compute the determinant of the R matrix. + * + * The determinant is computed in log form using logDeterminant for + * numerical stability and then exponentiated. + * + * Note, the covariance matrix \f$ \Sigma = (R^T R)^{-1} \f$, and hence + * \f$ \det(\Sigma) = 1 / \det(R^T R) = 1 / determinant()^ 2 \f$. + * + * @return double + */ + inline double determinant() const { return exp(logDeterminant()); } + + /** + * @brief Compute the log determinant of the R matrix. + * + * For numerical stability, the determinant is computed in log + * form, so it is a summation rather than a multiplication. + * + * Note, the covariance matrix \f$ \Sigma = (R^T R)^{-1} \f$, and hence + * \f$ \log \det(\Sigma) = - \log \det(R^T R) = - 2 logDeterminant() \f$. + * + * @return double + */ + double logDeterminant() const; + + /// @} + /// @name HybridValues methods. + /// @{ + + /** + * Calculate log-probability log(evaluate(x)) for HybridValues `x`. + * Simply dispatches to VectorValues version. + */ + double logProbability(const HybridValues& x) const override; + + /** + * Calculate probability for HybridValues `x`. + * Simply dispatches to VectorValues version. + */ + double evaluate(const HybridValues& x) const override; + + using Conditional::operator(); // Expose evaluate(const HybridValues&) method.. + using JacobianFactor::error; // Expose error(const HybridValues&) method.. + + /// @} + #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 /// @name Deprecated diff --git a/gtsam/linear/GaussianFactor.cpp b/gtsam/linear/GaussianFactor.cpp index 0802deff4e..e60e626a1d 100644 --- a/gtsam/linear/GaussianFactor.cpp +++ b/gtsam/linear/GaussianFactor.cpp @@ -18,16 +18,24 @@ // \callgraph +#include #include #include namespace gtsam { -/* ************************************************************************* */ - VectorValues GaussianFactor::hessianDiagonal() const { - VectorValues d; - hessianDiagonalAdd(d); - return d; - } +double GaussianFactor::error(const VectorValues& c) const { + throw std::runtime_error("GaussianFactor::error is not implemented"); +} + +double GaussianFactor::error(const HybridValues& c) const { + return this->error(c.continuous()); +} +VectorValues GaussianFactor::hessianDiagonal() const { + VectorValues d; + hessianDiagonalAdd(d); + return d; } + +} // namespace gtsam diff --git a/gtsam/linear/GaussianFactor.h b/gtsam/linear/GaussianFactor.h index 672f5aa0d9..eb4ea7e06d 100644 --- a/gtsam/linear/GaussianFactor.h +++ b/gtsam/linear/GaussianFactor.h @@ -63,8 +63,20 @@ namespace gtsam { /** Equals for testable */ virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const = 0; - /** Print for testable */ - virtual double error(const VectorValues& c) const = 0; /** 0.5*(A*x-b)'*D*(A*x-b) */ + /** + * In Gaussian factors, the error function returns either the negative log-likelihood, e.g., + * 0.5*(A*x-b)'*D*(A*x-b) + * for a \class JacobianFactor, or the negative log-density, e.g., + * 0.5*(A*x-b)'*D*(A*x-b) - log(k) + * for a \class GaussianConditional, where k is the normalization constant. + */ + virtual double error(const VectorValues& c) const; + + /** + * The Factor::error simply extracts the \class VectorValues from the + * \class HybridValues and calculates the error. + */ + double error(const HybridValues& c) const override; /** Return the dimension of the variable pointed to by the given key iterator */ virtual DenseIndex getDim(const_iterator variable) const = 0; diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index 72eb107d09..0f11639822 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -67,6 +67,22 @@ namespace gtsam { return spec; } + /* ************************************************************************* */ + double GaussianFactorGraph::error(const VectorValues& x) const { + double total_error = 0.; + for(const sharedFactor& factor: *this){ + if(factor) + total_error += factor->error(x); + } + return total_error; + } + + /* ************************************************************************* */ + double GaussianFactorGraph::probPrime(const VectorValues& c) const { + // NOTE the 0.5 constant is handled by the factor error. + return exp(-error(c)); + } + /* ************************************************************************* */ GaussianFactorGraph::shared_ptr GaussianFactorGraph::cloneToPtr() const { gtsam::GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph()); diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index 0d5057aa88..fb5e1ea362 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -54,6 +54,12 @@ namespace gtsam { static std::pair, boost::shared_ptr > DefaultEliminate(const FactorGraphType& factors, const Ordering& keys) { return EliminatePreferCholesky(factors, keys); } + /// The default ordering generation function + static Ordering DefaultOrderingFunc( + const FactorGraphType& graph, + boost::optional variableIndex) { + return Ordering::Colamd(*variableIndex); + } }; /* ************************************************************************* */ @@ -74,9 +80,20 @@ namespace gtsam { typedef EliminateableFactorGraph BaseEliminateable; ///< Typedef to base elimination class typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class + /// @name Constructors + /// @{ + /** Default constructor */ GaussianFactorGraph() {} + /** + * Construct from an initializer lists of GaussianFactor shared pointers. + * Example: + * GaussianFactorGraph graph = { factor1, factor2, factor3 }; + */ + GaussianFactorGraph(std::initializer_list factors) : Base(factors) {} + + /** Construct from iterator over factors */ template GaussianFactorGraph(ITERATOR firstFactor, ITERATOR lastFactor) : Base(firstFactor, lastFactor) {} @@ -92,6 +109,7 @@ namespace gtsam { /** Virtual destructor */ virtual ~GaussianFactorGraph() {} + /// @} /// @name Testable /// @{ @@ -149,20 +167,10 @@ namespace gtsam { std::map getKeyDimMap() const; /** unnormalized error */ - double error(const VectorValues& x) const { - double total_error = 0.; - for(const sharedFactor& factor: *this){ - if(factor) - total_error += factor->error(x); - } - return total_error; - } + double error(const VectorValues& x) const; /** Unnormalized probability. O(n) */ - double probPrime(const VectorValues& c) const { - // NOTE the 0.5 constant is handled by the factor error. - return exp(-error(c)); - } + double probPrime(const VectorValues& c) const; /** * Clone() performs a deep-copy of the graph, including all of the factors. diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 8646a9cae1..c5cde2c151 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -41,7 +40,6 @@ #include using namespace std; -using namespace boost::assign; namespace br { using namespace boost::range; using namespace boost::adaptors; @@ -49,6 +47,9 @@ using namespace boost::adaptors; namespace gtsam { +// Typedefs used in constructors below. +using Dims = std::vector; + /* ************************************************************************* */ void HessianFactor::Allocate(const Scatter& scatter) { gttic(HessianFactor_Allocate); @@ -74,14 +75,14 @@ HessianFactor::HessianFactor(const Scatter& scatter) { /* ************************************************************************* */ HessianFactor::HessianFactor() : - info_(cref_list_of<1>(1)) { + info_(Dims{1}) { assert(info_.rows() == 1); constantTerm() = 0.0; } /* ************************************************************************* */ -HessianFactor::HessianFactor(Key j, const Matrix& G, const Vector& g, double f) : - GaussianFactor(cref_list_of<1>(j)), info_(cref_list_of<2>(G.cols())(1)) { +HessianFactor::HessianFactor(Key j, const Matrix& G, const Vector& g, double f) + : GaussianFactor(KeyVector{j}), info_(Dims{G.cols(), 1}) { if (G.rows() != G.cols() || G.rows() != g.size()) throw invalid_argument( "Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions"); @@ -93,8 +94,8 @@ HessianFactor::HessianFactor(Key j, const Matrix& G, const Vector& g, double f) /* ************************************************************************* */ // error is 0.5*(x-mu)'*inv(Sigma)*(x-mu) = 0.5*(x'*G*x - 2*x'*G*mu + mu'*G*mu) // where G = inv(Sigma), g = G*mu, f = mu'*G*mu = mu'*g -HessianFactor::HessianFactor(Key j, const Vector& mu, const Matrix& Sigma) : - GaussianFactor(cref_list_of<1>(j)), info_(cref_list_of<2>(Sigma.cols())(1)) { +HessianFactor::HessianFactor(Key j, const Vector& mu, const Matrix& Sigma) + : GaussianFactor(KeyVector{j}), info_(Dims{Sigma.cols(), 1}) { if (Sigma.rows() != Sigma.cols() || Sigma.rows() != mu.size()) throw invalid_argument( "Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions"); @@ -107,8 +108,8 @@ HessianFactor::HessianFactor(Key j, const Vector& mu, const Matrix& Sigma) : HessianFactor::HessianFactor(Key j1, Key j2, const Matrix& G11, const Matrix& G12, const Vector& g1, const Matrix& G22, const Vector& g2, double f) : - GaussianFactor(cref_list_of<2>(j1)(j2)), info_( - cref_list_of<3>(G11.cols())(G22.cols())(1)) { + GaussianFactor(KeyVector{j1,j2}), info_( + Dims{G11.cols(),G22.cols(),1}) { info_.setDiagonalBlock(0, G11); info_.setOffDiagonalBlock(0, 1, G12); info_.setDiagonalBlock(1, G22); @@ -121,8 +122,8 @@ HessianFactor::HessianFactor(Key j1, Key j2, Key j3, const Matrix& G11, const Matrix& G12, const Matrix& G13, const Vector& g1, const Matrix& G22, const Matrix& G23, const Vector& g2, const Matrix& G33, const Vector& g3, double f) : - GaussianFactor(cref_list_of<3>(j1)(j2)(j3)), info_( - cref_list_of<4>(G11.cols())(G22.cols())(G33.cols())(1)) { + GaussianFactor(KeyVector{j1,j2,j3}), info_( + Dims{G11.cols(),G22.cols(),G33.cols(),1}) { if (G11.rows() != G11.cols() || G11.rows() != G12.rows() || G11.rows() != G13.rows() || G11.rows() != g1.size() || G22.cols() != G12.cols() || G33.cols() != G13.cols() diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h index 3eefe12288..492df138f4 100644 --- a/gtsam/linear/HessianFactor.h +++ b/gtsam/linear/HessianFactor.h @@ -196,6 +196,9 @@ namespace gtsam { /** Compare to another factor for testing (implementing Testable) */ bool equals(const GaussianFactor& lf, double tol = 1e-9) const override; + /// HybridValues simply extracts the \class VectorValues and calls error. + using GaussianFactor::error; + /** * Evaluate the factor error f(x). * returns 0.5*[x -1]'*H*[x -1] (also see constructor documentation) diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index 6354766874..84083c4bc1 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -31,7 +31,6 @@ #include #include -#include #include #include #include @@ -44,13 +43,16 @@ #include using namespace std; -using namespace boost::assign; namespace gtsam { +// Typedefs used in constructors below. +using Dims = std::vector; +using Pairs = std::vector>; + /* ************************************************************************* */ JacobianFactor::JacobianFactor() : - Ab_(cref_list_of<1>(1), 0) { + Ab_(Dims{1}, 0) { getb().setZero(); } @@ -68,29 +70,27 @@ JacobianFactor::JacobianFactor(const GaussianFactor& gf) { /* ************************************************************************* */ JacobianFactor::JacobianFactor(const Vector& b_in) : - Ab_(cref_list_of<1>(1), b_in.size()) { + Ab_(Dims{1}, b_in.size()) { getb() = b_in; } /* ************************************************************************* */ JacobianFactor::JacobianFactor(Key i1, const Matrix& A1, const Vector& b, const SharedDiagonal& model) { - fillTerms(cref_list_of<1>(make_pair(i1, A1)), b, model); + fillTerms(Pairs{{i1, A1}}, b, model); } /* ************************************************************************* */ JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2, const Matrix& A2, const Vector& b, const SharedDiagonal& model) { - fillTerms(cref_list_of<2>(make_pair(i1, A1))(make_pair(i2, A2)), b, model); + fillTerms(Pairs{{i1, A1}, {i2, A2}}, b, model); } /* ************************************************************************* */ JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, const Matrix& A3, const Vector& b, const SharedDiagonal& model) { - fillTerms( - cref_list_of<3>(make_pair(i1, A1))(make_pair(i2, A2))(make_pair(i3, A3)), - b, model); + fillTerms(Pairs{{i1, A1}, {i2, A2}, {i3, A3}}, b, model); } /* ************************************************************************* */ diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h index 8bcf18268a..ae661c642e 100644 --- a/gtsam/linear/JacobianFactor.h +++ b/gtsam/linear/JacobianFactor.h @@ -198,7 +198,12 @@ namespace gtsam { Vector unweighted_error(const VectorValues& c) const; /** (A*x-b) */ Vector error_vector(const VectorValues& c) const; /** (A*x-b)/sigma */ - double error(const VectorValues& c) const override; /** 0.5*(A*x-b)'*D*(A*x-b) */ + + /// HybridValues simply extracts the \class VectorValues and calls error. + using GaussianFactor::error; + + //// 0.5*(A*x-b)'*D*(A*x-b). + double error(const VectorValues& c) const override; /** Return the augmented information matrix represented by this GaussianFactor. * The augmented information matrix contains the information matrix with an diff --git a/gtsam/linear/KalmanFilter.cpp b/gtsam/linear/KalmanFilter.cpp index 8a1b145c34..05e82be087 100644 --- a/gtsam/linear/KalmanFilter.cpp +++ b/gtsam/linear/KalmanFilter.cpp @@ -27,9 +27,7 @@ #include #include -#include -using namespace boost::assign; using namespace std; namespace gtsam { diff --git a/gtsam/linear/LossFunctions.cpp b/gtsam/linear/LossFunctions.cpp index 7307c4a687..64ded7cc34 100644 --- a/gtsam/linear/LossFunctions.cpp +++ b/gtsam/linear/LossFunctions.cpp @@ -40,6 +40,10 @@ Vector Base::weight(const Vector& error) const { return w; } +Vector Base::sqrtWeight(const Vector &error) const { + return weight(error).cwiseSqrt(); +} + // The following three functions re-weight block matrices and a vector // according to their weight implementation diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index d9cfc1f3c3..ee05cb9871 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -115,9 +115,7 @@ class GTSAM_EXPORT Base { Vector weight(const Vector &error) const; /** square root version of the weight function */ - Vector sqrtWeight(const Vector &error) const { - return weight(error).cwiseSqrt(); - } + Vector sqrtWeight(const Vector &error) const; /** reweight block matrices and a vector according to their weight * implementation */ diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 8bcef6fcce..8c6b5fd555 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -47,8 +47,9 @@ void updateAb(MATRIX& Ab, int j, const Vector& a, const Vector& rd) { /* ************************************************************************* */ // check *above the diagonal* for non-zero entries -boost::optional checkIfDiagonal(const Matrix M) { +boost::optional checkIfDiagonal(const Matrix& M) { size_t m = M.rows(), n = M.cols(); + assert(m > 0); // check all non-diagonal entries bool full = false; size_t i, j; @@ -92,7 +93,7 @@ Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) { return Diagonal::Sigmas(diagonal->array().inverse(), true); } // NOTE(frank): only reaches here if !(smart && diagonal) - return shared_ptr(new Gaussian(R.rows(), R)); + return boost::make_shared(R.rows(), R); } /* ************************************************************************* */ @@ -108,7 +109,7 @@ Gaussian::shared_ptr Gaussian::Information(const Matrix& information, bool smart else { Eigen::LLT llt(information); Matrix R = llt.matrixU(); - return shared_ptr(new Gaussian(n, R)); + return boost::make_shared(n, R); } } @@ -238,6 +239,8 @@ void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const whitenInPlace(b); } +Matrix Gaussian::information() const { return R().transpose() * R(); } + /* ************************************************************************* */ // Diagonal /* ************************************************************************* */ @@ -283,6 +286,11 @@ Diagonal::shared_ptr Diagonal::Sigmas(const Vector& sigmas, bool smart) { full: return Diagonal::shared_ptr(new Diagonal(sigmas)); } +/* ************************************************************************* */ +Diagonal::shared_ptr Diagonal::Precisions(const Vector& precisions, + bool smart) { + return Variances(precisions.array().inverse(), smart); +} /* ************************************************************************* */ void Diagonal::print(const string& name) const { gtsam::print(sigmas_, name + "diagonal sigmas "); @@ -293,22 +301,18 @@ Vector Diagonal::whiten(const Vector& v) const { return v.cwiseProduct(invsigmas_); } -/* ************************************************************************* */ Vector Diagonal::unwhiten(const Vector& v) const { return v.cwiseProduct(sigmas_); } -/* ************************************************************************* */ Matrix Diagonal::Whiten(const Matrix& H) const { return vector_scale(invsigmas(), H); } -/* ************************************************************************* */ void Diagonal::WhitenInPlace(Matrix& H) const { vector_scale_inplace(invsigmas(), H); } -/* ************************************************************************* */ void Diagonal::WhitenInPlace(Eigen::Block H) const { H = invsigmas().asDiagonal() * H; } @@ -376,6 +380,32 @@ Vector Constrained::whiten(const Vector& v) const { return c; } +/* ************************************************************************* */ +Constrained::shared_ptr Constrained::MixedSigmas(const Vector& sigmas) { + return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas); +} + +Constrained::shared_ptr Constrained::MixedSigmas(double m, + const Vector& sigmas) { + return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas); +} + +Constrained::shared_ptr Constrained::MixedVariances(const Vector& mu, + const Vector& variances) { + return shared_ptr(new Constrained(mu, variances.cwiseSqrt())); +} +Constrained::shared_ptr Constrained::MixedVariances(const Vector& variances) { + return shared_ptr(new Constrained(variances.cwiseSqrt())); +} + +Constrained::shared_ptr Constrained::MixedPrecisions(const Vector& mu, + const Vector& precisions) { + return MixedVariances(mu, precisions.array().inverse()); +} +Constrained::shared_ptr Constrained::MixedPrecisions(const Vector& precisions) { + return MixedVariances(precisions.array().inverse()); +} + /* ************************************************************************* */ double Constrained::squaredMahalanobisDistance(const Vector& v) const { Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements @@ -622,6 +652,11 @@ void Unit::print(const std::string& name) const { cout << name << "unit (" << dim_ << ") " << endl; } +/* ************************************************************************* */ +double Unit::squaredMahalanobisDistance(const Vector& v) const { + return v.dot(v); +} + /* ************************************************************************* */ // Robust /* ************************************************************************* */ @@ -662,6 +697,13 @@ void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{ robust_->reweight(A1,A2,A3,b); } +Vector Robust::unweightedWhiten(const Vector& v) const { + return noise_->unweightedWhiten(v); +} +double Robust::weight(const Vector& v) const { + return robust_->weight(v.norm()); +} + Robust::shared_ptr Robust::Create( const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){ return shared_ptr(new Robust(robust,noise)); diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 5a29e5d7dd..447121848e 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -255,7 +255,7 @@ namespace gtsam { virtual Matrix R() const { return thisR();} /// Compute information matrix - virtual Matrix information() const { return R().transpose() * R(); } + virtual Matrix information() const; /// Compute covariance matrix virtual Matrix covariance() const; @@ -319,9 +319,7 @@ namespace gtsam { * A diagonal noise model created by specifying a Vector of precisions, i.e. * i.e. the diagonal of the information matrix, i.e., weights */ - static shared_ptr Precisions(const Vector& precisions, bool smart = true) { - return Variances(precisions.array().inverse(), smart); - } + static shared_ptr Precisions(const Vector& precisions, bool smart = true); void print(const std::string& name) const override; Vector sigmas() const override { return sigmas_; } @@ -426,39 +424,27 @@ namespace gtsam { * A diagonal noise model created by specifying a Vector of * standard devations, some of which might be zero */ - static shared_ptr MixedSigmas(const Vector& sigmas) { - return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas); - } + static shared_ptr MixedSigmas(const Vector& sigmas); /** * A diagonal noise model created by specifying a Vector of * standard devations, some of which might be zero */ - static shared_ptr MixedSigmas(double m, const Vector& sigmas) { - return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas); - } + static shared_ptr MixedSigmas(double m, const Vector& sigmas); /** * A diagonal noise model created by specifying a Vector of * standard devations, some of which might be zero */ - static shared_ptr MixedVariances(const Vector& mu, const Vector& variances) { - return shared_ptr(new Constrained(mu, variances.cwiseSqrt())); - } - static shared_ptr MixedVariances(const Vector& variances) { - return shared_ptr(new Constrained(variances.cwiseSqrt())); - } + static shared_ptr MixedVariances(const Vector& mu, const Vector& variances); + static shared_ptr MixedVariances(const Vector& variances); /** * A diagonal noise model created by specifying a Vector of * precisions, some of which might be inf */ - static shared_ptr MixedPrecisions(const Vector& mu, const Vector& precisions) { - return MixedVariances(mu, precisions.array().inverse()); - } - static shared_ptr MixedPrecisions(const Vector& precisions) { - return MixedVariances(precisions.array().inverse()); - } + static shared_ptr MixedPrecisions(const Vector& mu, const Vector& precisions); + static shared_ptr MixedPrecisions(const Vector& precisions); /** * The squaredMahalanobisDistance function for a constrained noisemodel, @@ -616,7 +602,7 @@ namespace gtsam { bool isUnit() const override { return true; } void print(const std::string& name) const override; - double squaredMahalanobisDistance(const Vector& v) const override {return v.dot(v); } + double squaredMahalanobisDistance(const Vector& v) const override; Vector whiten(const Vector& v) const override { return v; } Vector unwhiten(const Vector& v) const override { return v; } Matrix Whiten(const Matrix& H) const override { return H; } @@ -710,12 +696,8 @@ namespace gtsam { void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override; void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override; - Vector unweightedWhiten(const Vector& v) const override { - return noise_->unweightedWhiten(v); - } - double weight(const Vector& v) const override { - return robust_->weight(v.norm()); - } + Vector unweightedWhiten(const Vector& v) const override; + double weight(const Vector& v) const override; static shared_ptr Create( const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise); @@ -732,7 +714,7 @@ namespace gtsam { }; // Helper function - GTSAM_EXPORT boost::optional checkIfDiagonal(const Matrix M); + GTSAM_EXPORT boost::optional checkIfDiagonal(const Matrix& M); } // namespace noiseModel diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index a7af7d8d8c..1414121597 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -136,6 +136,17 @@ void GaussianFactorGraphSystem::rightPrecondition(const Vector &x, preconditioner_.transposeSolve(x, y); } +/**********************************************************************************/ +void GaussianFactorGraphSystem::scal(const double alpha, Vector &x) const { + x *= alpha; +} +double GaussianFactorGraphSystem::dot(const Vector &x, const Vector &y) const { + return x.dot(y); +} +void GaussianFactorGraphSystem::axpy(const double alpha, const Vector &x, + Vector &y) const { + y += alpha * x; +} /**********************************************************************************/ VectorValues buildVectorValues(const Vector &v, const Ordering &ordering, const map & dimensions) { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 198b77ec8d..cb90ae5200 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -102,15 +102,9 @@ class GTSAM_EXPORT GaussianFactorGraphSystem { void multiply(const Vector &x, Vector& y) const; void leftPrecondition(const Vector &x, Vector &y) const; void rightPrecondition(const Vector &x, Vector &y) const; - inline void scal(const double alpha, Vector &x) const { - x *= alpha; - } - inline double dot(const Vector &x, const Vector &y) const { - return x.dot(y); - } - inline void axpy(const double alpha, const Vector &x, Vector &y) const { - y += alpha * x; - } + void scal(const double alpha, Vector &x) const; + double dot(const Vector &x, const Vector &y) const; + void axpy(const double alpha, const Vector &x, Vector &y) const; void getb(Vector &b) const; }; diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index 6689cdbed4..4e761a3fd3 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -110,7 +110,7 @@ VectorValues SubgraphPreconditioner::x(const VectorValues& y) const { /* ************************************************************************* */ double SubgraphPreconditioner::error(const VectorValues& y) const { - Errors e(y); + Errors e = createErrors(y); VectorValues x = this->x(y); Errors e2 = Ab2_.gaussianErrors(x); return 0.5 * (dot(e, e) + dot(e2,e2)); @@ -129,7 +129,7 @@ VectorValues SubgraphPreconditioner::gradient(const VectorValues &y) const { /* ************************************************************************* */ // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] Errors SubgraphPreconditioner::operator*(const VectorValues &y) const { - Errors e(y); + Errors e = createErrors(y); VectorValues x = Rc1_.backSubstitute(y); /* x=inv(R1)*y */ Errors e2 = Ab2_ * x; /* A2*x */ e.splice(e.end(), e2); diff --git a/gtsam/linear/VectorValues.cpp b/gtsam/linear/VectorValues.cpp index 62996af27d..04792b6ba7 100644 --- a/gtsam/linear/VectorValues.cpp +++ b/gtsam/linear/VectorValues.cpp @@ -98,30 +98,34 @@ namespace gtsam { } /* ************************************************************************ */ - void VectorValues::update(const VectorValues& values) - { + VectorValues& VectorValues::update(const VectorValues& values) { iterator hint = begin(); - for(const KeyValuePair& key_value: values) - { - // Use this trick to find the value using a hint, since we are inserting from another sorted map + for (const KeyValuePair& key_value : values) { + // Use this trick to find the value using a hint, since we are inserting + // from another sorted map size_t oldSize = values_.size(); hint = values_.insert(hint, key_value); - if(values_.size() > oldSize) { + if (values_.size() > oldSize) { values_.unsafe_erase(hint); - throw out_of_range("Requested to update a VectorValues with another VectorValues that contains keys not present in the first."); + throw out_of_range( + "Requested to update a VectorValues with another VectorValues that " + "contains keys not present in the first."); } else { hint->second = key_value.second; } } + return *this; } /* ************************************************************************ */ - void VectorValues::insert(const VectorValues& values) - { + VectorValues& VectorValues::insert(const VectorValues& values) { size_t originalSize = size(); values_.insert(values.begin(), values.end()); - if(size() != originalSize + values.size()) - throw invalid_argument("Requested to insert a VectorValues into another VectorValues that already contains one or more of its keys."); + if (size() != originalSize + values.size()) + throw invalid_argument( + "Requested to insert a VectorValues into another VectorValues that " + "already contains one or more of its keys."); + return *this; } /* ************************************************************************ */ diff --git a/gtsam/linear/VectorValues.h b/gtsam/linear/VectorValues.h index 4e1f793a03..35ab628d38 100644 --- a/gtsam/linear/VectorValues.h +++ b/gtsam/linear/VectorValues.h @@ -88,11 +88,13 @@ namespace gtsam { /// @name Standard Constructors /// @{ - /** - * Default constructor creates an empty VectorValues. - */ + /// Default constructor creates an empty VectorValues. VectorValues() {} + /// Construct from initializer list. + VectorValues(std::initializer_list> init) + : values_(init.begin(), init.end()) {} + /** Merge two VectorValues into one, this is more efficient than inserting * elements one by one. */ VectorValues(const VectorValues& first, const VectorValues& second); @@ -167,7 +169,7 @@ namespace gtsam { /** For all key/value pairs in \c values, replace values with corresponding keys in this class * with those in \c values. Throws std::out_of_range if any keys in \c values are not present * in this class. */ - void update(const VectorValues& values); + VectorValues& update(const VectorValues& values); /** Insert a vector \c value with key \c j. Throws an invalid_argument exception if the key \c * j is already used. @@ -198,7 +200,7 @@ namespace gtsam { /** Insert all values from \c values. Throws an invalid_argument exception if any keys to be * inserted are already used. */ - void insert(const VectorValues& values); + VectorValues& insert(const VectorValues& values); /** insert that mimics the STL map insert - if the value already exists, the map is not modified * and an iterator to the existing value is returned, along with 'false'. If the value did not @@ -212,6 +214,14 @@ namespace gtsam { #endif } + /// insert_or_assign that mimics the STL map insert_or_assign - if the value already exists, the + /// map is updated, otherwise a new value is inserted at j. + void insert_or_assign(Key j, const Vector& value) { + if (!tryInsert(j, value).second) { + (*this)[j] = value; + } + } + /** Erase the vector with the given key, or throw std::out_of_range if it does not exist */ void erase(Key var) { if (values_.unsafe_erase(var) == 0) diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index fdf156ff99..c0230f1c21 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -236,7 +236,9 @@ class VectorValues { bool equals(const gtsam::VectorValues& expected, double tol) const; void insert(size_t j, Vector value); Vector vector() const; + Vector vector(const gtsam::KeyVector& keys) const; Vector at(size_t j) const; + void insert(const gtsam::VectorValues& values); void update(const gtsam::VectorValues& values); //Advanced Interface @@ -401,6 +403,7 @@ class GaussianFactorGraph { // Optimizing and linear algebra gtsam::VectorValues optimize() const; + gtsam::VectorValues optimizeDensely() const; gtsam::VectorValues optimize(const gtsam::Ordering& ordering) const; gtsam::VectorValues optimizeGradientSearch() const; gtsam::VectorValues gradient(const gtsam::VectorValues& x0) const; @@ -453,6 +456,7 @@ class GaussianFactorGraph { }; #include +#include virtual class GaussianConditional : gtsam::JacobianFactor { // Constructors GaussianConditional(size_t key, Vector d, Matrix R, @@ -470,6 +474,10 @@ virtual class GaussianConditional : gtsam::JacobianFactor { size_t name2, Matrix T); // Named constructors + static gtsam::GaussianConditional FromMeanAndStddev(gtsam::Key key, + const Vector& mu, + double sigma); + static gtsam::GaussianConditional FromMeanAndStddev(gtsam::Key key, const Matrix& A, gtsam::Key parent, @@ -490,6 +498,10 @@ virtual class GaussianConditional : gtsam::JacobianFactor { bool equals(const gtsam::GaussianConditional& cg, double tol) const; // Standard Interface + double logNormalizationConstant() const; + double logProbability(const gtsam::VectorValues& x) const; + double evaluate(const gtsam::VectorValues& x) const; + double error(const gtsam::VectorValues& x) const; gtsam::Key firstFrontalKey() const; gtsam::VectorValues solve(const gtsam::VectorValues& parents) const; gtsam::JacobianFactor* likelihood( @@ -508,6 +520,11 @@ virtual class GaussianConditional : gtsam::JacobianFactor { // enabling serialization functionality void serialize() const; + + // Expose HybridValues versions + double logProbability(const gtsam::HybridValues& x) const; + double evaluate(const gtsam::HybridValues& x) const; + double error(const gtsam::HybridValues& x) const; }; #include @@ -543,17 +560,22 @@ virtual class GaussianBayesNet { bool equals(const gtsam::GaussianBayesNet& other, double tol) const; size_t size() const; - // Standard interface void push_back(gtsam::GaussianConditional* conditional); void push_back(const gtsam::GaussianBayesNet& bayesNet); gtsam::GaussianConditional* front() const; gtsam::GaussianConditional* back() const; + // Standard interface + // Standard Interface + double logProbability(const gtsam::VectorValues& x) const; + double evaluate(const gtsam::VectorValues& x) const; + double error(const gtsam::VectorValues& x) const; + gtsam::VectorValues optimize() const; - gtsam::VectorValues optimize(gtsam::VectorValues given) const; + gtsam::VectorValues optimize(const gtsam::VectorValues& given) const; gtsam::VectorValues optimizeGradientSearch() const; - gtsam::VectorValues sample(gtsam::VectorValues given) const; + gtsam::VectorValues sample(const gtsam::VectorValues& given) const; gtsam::VectorValues sample() const; gtsam::VectorValues backSubstitute(const gtsam::VectorValues& gx) const; gtsam::VectorValues backSubstituteTranspose(const gtsam::VectorValues& gx) const; @@ -613,17 +635,6 @@ virtual class GaussianBayesTree { gtsam::GaussianBayesNet* jointBayesNet(size_t key1, size_t key2) const; }; -#include -class Errors { - //Constructors - Errors(); - Errors(const gtsam::VectorValues& V); - - //Testable - void print(string s = "Errors"); - bool equals(const gtsam::Errors& expected, double tol) const; -}; - #include class GaussianISAM { //Constructor diff --git a/gtsam/linear/tests/testErrors.cpp b/gtsam/linear/tests/testErrors.cpp index f11fb90b99..2075dc5bca 100644 --- a/gtsam/linear/tests/testErrors.cpp +++ b/gtsam/linear/tests/testErrors.cpp @@ -15,9 +15,6 @@ * @author Frank Dellaert */ -#include // for += -using namespace boost::assign; - #include #include #include @@ -26,16 +23,13 @@ using namespace std; using namespace gtsam; /* ************************************************************************* */ -TEST( Errors, arithmetic ) -{ - Errors e; - e += Vector2(1.0,2.0), Vector3(3.0,4.0,5.0); - DOUBLES_EQUAL(1+4+9+16+25,dot(e,e),1e-9); +TEST(Errors, arithmetic) { + Errors e = {Vector2(1.0, 2.0), Vector3(3.0, 4.0, 5.0)}; + DOUBLES_EQUAL(1 + 4 + 9 + 16 + 25, dot(e, e), 1e-9); axpy(2.0, e, e); - Errors expected; - expected += Vector2(3.0,6.0), Vector3(9.0,12.0,15.0); - CHECK(assert_equal(expected,e)); + const Errors expected = {Vector2(3.0, 6.0), Vector3(9.0, 12.0, 15.0)}; + CHECK(assert_equal(expected, e)); } /* ************************************************************************* */ diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 2b125265f7..7f0641dbf8 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -1,6 +1,6 @@ /* ---------------------------------------------------------------------------- - * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * GTSAM Copyright 2010-2022, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) @@ -25,15 +25,12 @@ #include #include -#include -#include // for operator += #include // STL/C++ #include #include -using namespace boost::assign; using namespace std::placeholders; using namespace std; using namespace gtsam; @@ -41,15 +38,15 @@ using symbol_shorthand::X; static const Key _x_ = 11, _y_ = 22, _z_ = 33; -static GaussianBayesNet smallBayesNet = - list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1))( - GaussianConditional(_y_, Vector1::Constant(5), I_1x1)); +static GaussianBayesNet smallBayesNet = { + boost::make_shared(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1), + boost::make_shared(_y_, Vector1::Constant(5), I_1x1)}; -static GaussianBayesNet noisyBayesNet = - list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1, - noiseModel::Isotropic::Sigma(1, 2.0)))( - GaussianConditional(_y_, Vector1::Constant(5), I_1x1, - noiseModel::Isotropic::Sigma(1, 3.0))); +static GaussianBayesNet noisyBayesNet = { + boost::make_shared(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1, + noiseModel::Isotropic::Sigma(1, 2.0)), + boost::make_shared(_y_, Vector1::Constant(5), I_1x1, + noiseModel::Isotropic::Sigma(1, 3.0))}; /* ************************************************************************* */ TEST( GaussianBayesNet, Matrix ) @@ -67,6 +64,40 @@ TEST( GaussianBayesNet, Matrix ) EXPECT(assert_equal(d,d1)); } +/* ************************************************************************* */ +// Check that the evaluate function matches direct calculation with R. +TEST(GaussianBayesNet, Evaluate1) { + // Let's evaluate at the mean + const VectorValues mean = smallBayesNet.optimize(); + + // We get the matrix, which has noise model applied! + const Matrix R = smallBayesNet.matrix().first; + const Matrix invSigma = R.transpose() * R; + + // The Bayes net is a Gaussian density ~ exp (-0.5*(Rx-d)'*(Rx-d)) + // which at the mean is 1.0! So, the only thing we need to calculate is + // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). + // The covariance matrix inv(Sigma) = R'*R, so the determinant is + const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); + EXPECT_DOUBLES_EQUAL(log(constant), + smallBayesNet.at(0)->logNormalizationConstant() + + smallBayesNet.at(1)->logNormalizationConstant(), + 1e-9); + const double actual = smallBayesNet.evaluate(mean); + EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); +} + +// Check the evaluate with non-unit noise. +TEST(GaussianBayesNet, Evaluate2) { + // See comments in test above. + const VectorValues mean = noisyBayesNet.optimize(); + const Matrix R = noisyBayesNet.matrix().first; + const Matrix invSigma = R.transpose() * R; + const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); + const double actual = noisyBayesNet.evaluate(mean); + EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); +} + /* ************************************************************************* */ TEST( GaussianBayesNet, NoisyMatrix ) { @@ -85,11 +116,11 @@ TEST( GaussianBayesNet, NoisyMatrix ) /* ************************************************************************* */ TEST(GaussianBayesNet, Optimize) { - VectorValues expected = - map_list_of(_x_, Vector1::Constant(4))(_y_, Vector1::Constant(5)); - VectorValues actual = smallBayesNet.optimize(); + const VectorValues expected{{_x_, Vector1::Constant(4)}, + {_y_, Vector1::Constant(5)}}; + const VectorValues actual = smallBayesNet.optimize(); EXPECT(assert_equal(expected, actual)); -} + } /* ************************************************************************* */ TEST(GaussianBayesNet, NoisyOptimize) { @@ -97,7 +128,7 @@ TEST(GaussianBayesNet, NoisyOptimize) { Vector d; boost::tie(R, d) = noisyBayesNet.matrix(); // find matrix and RHS const Vector x = R.inverse() * d; - VectorValues expected = map_list_of(_x_, x.head(1))(_y_, x.tail(1)); + const VectorValues expected{{_x_, x.head(1)}, {_y_, x.tail(1)}}; VectorValues actual = noisyBayesNet.optimize(); EXPECT(assert_equal(expected, actual)); @@ -106,17 +137,16 @@ TEST(GaussianBayesNet, NoisyOptimize) { /* ************************************************************************* */ TEST( GaussianBayesNet, optimizeIncomplete ) { - static GaussianBayesNet incompleteBayesNet = list_of - (GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1)); + static GaussianBayesNet incompleteBayesNet; + incompleteBayesNet.emplace_shared( + _x_, Vector1::Constant(9), I_1x1, _y_, I_1x1); - VectorValues solutionForMissing = map_list_of - (_y_, Vector1::Constant(5)); + VectorValues solutionForMissing { {_y_, Vector1::Constant(5)} }; VectorValues actual = incompleteBayesNet.optimize(solutionForMissing); - VectorValues expected = map_list_of - (_x_, Vector1::Constant(4)) - (_y_, Vector1::Constant(5)); + VectorValues expected{{_x_, Vector1::Constant(4)}, + {_y_, Vector1::Constant(5)}}; EXPECT(assert_equal(expected,actual)); } @@ -129,27 +159,28 @@ TEST( GaussianBayesNet, optimize3 ) // 5 1 5 // NOTE: we are supplying a new RHS here - VectorValues expected = map_list_of - (_x_, Vector1::Constant(-1)) - (_y_, Vector1::Constant(5)); + VectorValues expected { {_x_, Vector1::Constant(-1)}, + {_y_, Vector1::Constant(5)} }; // Test different RHS version - VectorValues gx = map_list_of - (_x_, Vector1::Constant(4)) - (_y_, Vector1::Constant(5)); + VectorValues gx{{_x_, Vector1::Constant(4)}, {_y_, Vector1::Constant(5)}}; VectorValues actual = smallBayesNet.backSubstitute(gx); EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ -TEST(GaussianBayesNet, sample) { - GaussianBayesNet gbn; - Matrix A1 = (Matrix(2, 2) << 1., 2., 3., 4.).finished(); - const Vector2 mean(20, 40), b(10, 10); - const double sigma = 0.01; +namespace sampling { +static Matrix A1 = (Matrix(2, 2) << 1., 2., 3., 4.).finished(); +static const Vector2 mean(20, 40), b(10, 10); +static const double sigma = 0.01; +static const GaussianBayesNet gbn = { + GaussianConditional::sharedMeanAndStddev(X(0), A1, X(1), b, sigma), + GaussianDensity::sharedMeanAndStddev(X(1), mean, sigma)}; +} // namespace sampling - gbn.add(GaussianConditional::FromMeanAndStddev(X(0), A1, X(1), b, sigma)); - gbn.add(GaussianDensity::FromMeanAndStddev(X(1), mean, sigma)); +/* ************************************************************************* */ +TEST(GaussianBayesNet, sample) { + using namespace sampling; auto actual = gbn.sample(); EXPECT_LONGS_EQUAL(2, actual.size()); @@ -165,6 +196,23 @@ TEST(GaussianBayesNet, sample) { // EXPECT(assert_equal(Vector2(110.032083, 230.039811), actual[X(0)], 1e-5)); } +/* ************************************************************************* */ +// Do Monte Carlo integration of square deviation, should be equal to 9.0. +TEST(GaussianBayesNet, MonteCarloIntegration) { + GaussianBayesNet gbn; + gbn.push_back(noisyBayesNet.at(1)); + + double sum = 0.0; + constexpr size_t N = 1000; + // loop for N samples: + for (size_t i = 0; i < N; i++) { + const auto X_i = gbn.sample(); + sum += pow(X_i[_y_].x() - 5.0, 2.0); + } + // Expected is variance = 3*3 + EXPECT_DOUBLES_EQUAL(9.0, sum / N, 0.5); // Pretty high. +} + /* ************************************************************************* */ TEST(GaussianBayesNet, ordering) { @@ -202,13 +250,9 @@ TEST( GaussianBayesNet, backSubstituteTranspose ) // x=R'*y, expected=inv(R')*x // 2 = 1 2 // 5 1 1 3 - VectorValues - x = map_list_of - (_x_, Vector1::Constant(2)) - (_y_, Vector1::Constant(5)), - expected = map_list_of - (_x_, Vector1::Constant(2)) - (_y_, Vector1::Constant(3)); + const VectorValues x{{_x_, Vector1::Constant(2)}, + {_y_, Vector1::Constant(5)}}, + expected{{_x_, Vector1::Constant(2)}, {_y_, Vector1::Constant(3)}}; VectorValues actual = smallBayesNet.backSubstituteTranspose(x); EXPECT(assert_equal(expected, actual)); @@ -225,13 +269,8 @@ TEST( GaussianBayesNet, backSubstituteTransposeNoisy ) // x=R'*y, expected=inv(R')*x // 2 = 1 2 // 5 1 1 3 - VectorValues - x = map_list_of - (_x_, Vector1::Constant(2)) - (_y_, Vector1::Constant(5)), - expected = map_list_of - (_x_, Vector1::Constant(4)) - (_y_, Vector1::Constant(9)); + VectorValues x{{_x_, Vector1::Constant(2)}, {_y_, Vector1::Constant(5)}}, + expected{{_x_, Vector1::Constant(4)}, {_y_, Vector1::Constant(9)}}; VectorValues actual = noisyBayesNet.backSubstituteTranspose(x); EXPECT(assert_equal(expected, actual)); diff --git a/gtsam/linear/tests/testGaussianBayesTree.cpp b/gtsam/linear/tests/testGaussianBayesTree.cpp index 18b4674b5e..ba499ea24d 100644 --- a/gtsam/linear/tests/testGaussianBayesTree.cpp +++ b/gtsam/linear/tests/testGaussianBayesTree.cpp @@ -23,43 +23,45 @@ #include #include -#include -#include // for operator += -#include // for operator += #include +#include -using namespace boost::assign; using namespace std::placeholders; using namespace std; using namespace gtsam; +using Pairs = std::vector>; + namespace { const Key x1=1, x2=2, x3=3, x4=4; const SharedDiagonal chainNoise = noiseModel::Isotropic::Sigma(1, 0.5); - const GaussianFactorGraph chain = list_of - (JacobianFactor(x2, (Matrix(1, 1) << 1.).finished(), x1, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)) - (JacobianFactor(x2, (Matrix(1, 1) << 1.).finished(), x3, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)) - (JacobianFactor(x3, (Matrix(1, 1) << 1.).finished(), x4, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)) - (JacobianFactor(x4, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)); - const Ordering chainOrdering = Ordering(list_of(x2)(x1)(x3)(x4)); + const GaussianFactorGraph chain = { + boost::make_shared( + x2, I_1x1, x1, I_1x1, (Vector(1) << 1.).finished(), chainNoise), + boost::make_shared( + x2, I_1x1, x3, I_1x1, (Vector(1) << 1.).finished(), chainNoise), + boost::make_shared( + x3, I_1x1, x4, I_1x1, (Vector(1) << 1.).finished(), chainNoise), + boost::make_shared( + x4, I_1x1, (Vector(1) << 1.).finished(), chainNoise)}; + const Ordering chainOrdering {x2, x1, x3, x4}; /* ************************************************************************* */ // Helper functions for below - GaussianBayesTreeClique::shared_ptr MakeClique(const GaussianConditional& conditional) + GaussianBayesTreeClique::shared_ptr LeafClique(const GaussianConditional& conditional) { return boost::make_shared( boost::make_shared(conditional)); } - template + typedef std::vector Children; + GaussianBayesTreeClique::shared_ptr MakeClique( - const GaussianConditional& conditional, const CHILDREN& children) + const GaussianConditional& conditional, const Children& children) { - GaussianBayesTreeClique::shared_ptr clique = - boost::make_shared( - boost::make_shared(conditional)); + auto clique = LeafClique(conditional); clique->children.assign(children.begin(), children.end()); - for(typename CHILDREN::const_iterator child = children.begin(); child != children.end(); ++child) + for(Children::const_iterator child = children.begin(); child != children.end(); ++child) (*child)->parent_ = clique; return clique; } @@ -90,23 +92,25 @@ TEST( GaussianBayesTree, eliminate ) EXPECT_LONGS_EQUAL(3, scatter.at(2).key); EXPECT_LONGS_EQUAL(4, scatter.at(3).key); - Matrix two = (Matrix(1, 1) << 2.).finished(); - Matrix one = (Matrix(1, 1) << 1.).finished(); + const Matrix two = (Matrix(1, 1) << 2.).finished(); + const Matrix one = I_1x1; + + const GaussianConditional gc1( + std::map{ + {x3, (Matrix21() << 2., 0.).finished()}, + {x4, (Matrix21() << 2., 2.).finished()}, + }, + 2, Vector2(2., 2.)); + const GaussianConditional gc2( + Pairs{ + {x2, (Matrix21() << -2. * sqrt(2.), 0.).finished()}, + {x1, (Matrix21() << -sqrt(2.), -sqrt(2.)).finished()}, + {x3, (Matrix21() << -sqrt(2.), sqrt(2.)).finished()}, + }, + 2, (Vector(2) << -2. * sqrt(2.), 0.).finished()); GaussianBayesTree bayesTree_expected; - bayesTree_expected.insertRoot( - MakeClique( - GaussianConditional( - pair_list_of(x3, (Matrix21() << 2., 0.).finished())( - x4, (Matrix21() << 2., 2.).finished()), 2, Vector2(2., 2.)), - list_of( - MakeClique( - GaussianConditional( - pair_list_of(x2, - (Matrix21() << -2. * sqrt(2.), 0.).finished())(x1, - (Matrix21() << -sqrt(2.), -sqrt(2.)).finished())(x3, - (Matrix21() << -sqrt(2.), sqrt(2.)).finished()), 2, - (Vector(2) << -2. * sqrt(2.), 0.).finished()))))); + bayesTree_expected.insertRoot(MakeClique(gc1, Children{LeafClique(gc2)})); EXPECT(assert_equal(bayesTree_expected, bt)); } @@ -114,11 +118,10 @@ TEST( GaussianBayesTree, eliminate ) /* ************************************************************************* */ TEST( GaussianBayesTree, optimizeMultiFrontal ) { - VectorValues expected = pair_list_of - (x1, (Vector(1) << 0.).finished()) - (x2, (Vector(1) << 1.).finished()) - (x3, (Vector(1) << 0.).finished()) - (x4, (Vector(1) << 1.).finished()); + VectorValues expected = {{x1, (Vector(1) << 0.).finished()}, + {x2, (Vector(1) << 1.).finished()}, + {x3, (Vector(1) << 0.).finished()}, + {x4, (Vector(1) << 1.).finished()}}; VectorValues actual = chain.eliminateMultifrontal(chainOrdering)->optimize(); EXPECT(assert_equal(expected,actual)); @@ -126,40 +129,61 @@ TEST( GaussianBayesTree, optimizeMultiFrontal ) /* ************************************************************************* */ TEST(GaussianBayesTree, complicatedMarginal) { - // Create the conditionals to go in the BayesTree + const GaussianConditional gc1( + Pairs{{11, (Matrix(3, 1) << 0.0971, 0, 0).finished()}, + {12, (Matrix(3, 2) << 0.3171, 0.4387, 0.9502, 0.3816, 0, 0.7655) + .finished()}}, + 2, Vector3(0.2638, 0.1455, 0.1361)); + const GaussianConditional gc2( + Pairs{ + {9, (Matrix(3, 1) << 0.7952, 0, 0).finished()}, + {10, (Matrix(3, 2) << 0.4456, 0.7547, 0.6463, 0.2760, 0, 0.6797) + .finished()}, + {11, (Matrix(3, 1) << 0.6551, 0.1626, 0.1190).finished()}, + {12, (Matrix(3, 2) << 0.4984, 0.5853, 0.9597, 0.2238, 0.3404, 0.7513) + .finished()}}, + 2, Vector3(0.4314, 0.9106, 0.1818)); + const GaussianConditional gc3( + Pairs{{7, (Matrix(3, 1) << 0.2551, 0, 0).finished()}, + {8, (Matrix(3, 2) << 0.8909, 0.1386, 0.9593, 0.1493, 0, 0.2575) + .finished()}, + {11, (Matrix(3, 1) << 0.8407, 0.2543, 0.8143).finished()}}, + 2, Vector3(0.3998, 0.2599, 0.8001)); + const GaussianConditional gc4( + Pairs{{5, (Matrix(3, 1) << 0.2435, 0, 0).finished()}, + {6, (Matrix(3, 2) << 0.4733, 0.1966, 0.3517, 0.2511, 0.8308, 0.0) + .finished()}, + // NOTE the non-upper-triangular form + // here since this test was written when we had column permutations + // from LDL. The code still works currently (does not enfore + // upper-triangularity in this case) but this test will need to be + // redone if this stops working in the future + {7, (Matrix(3, 1) << 0.5853, 0.5497, 0.9172).finished()}, + {8, (Matrix(3, 2) << 0.2858, 0.3804, 0.7572, 0.5678, 0.7537, 0.0759) + .finished()}}, + 2, Vector3(0.8173, 0.8687, 0.0844)); + const GaussianConditional gc5( + Pairs{{3, (Matrix(3, 1) << 0.0540, 0, 0).finished()}, + {4, (Matrix(3, 2) << 0.9340, 0.4694, 0.1299, 0.0119, 0, 0.3371) + .finished()}, + {6, (Matrix(3, 2) << 0.1622, 0.5285, 0.7943, 0.1656, 0.3112, 0.6020) + .finished()}}, + 2, Vector3(0.9619, 0.0046, 0.7749)); + const GaussianConditional gc6( + Pairs{{1, (Matrix(3, 1) << 0.2630, 0, 0).finished()}, + {2, (Matrix(3, 2) << 0.7482, 0.2290, 0.4505, 0.9133, 0, 0.1524) + .finished()}, + {5, (Matrix(3, 1) << 0.8258, 0.5383, 0.9961).finished()}}, + 2, Vector3(0.0782, 0.4427, 0.1067)); + + // Create the bayes tree: GaussianBayesTree bt; - bt.insertRoot( - MakeClique(GaussianConditional(pair_list_of (11, (Matrix(3,1) << 0.0971, 0, 0).finished()) - (12, (Matrix(3,2) << 0.3171, 0.4387, 0.9502, 0.3816, 0, 0.7655).finished()), - 2, Vector3(0.2638, 0.1455, 0.1361)), list_of - (MakeClique(GaussianConditional(pair_list_of (9, (Matrix(3,1) << 0.7952, 0, 0).finished()) - (10, (Matrix(3,2) << 0.4456, 0.7547, 0.6463, 0.2760, 0, 0.6797).finished()) - (11, (Matrix(3,1) << 0.6551, 0.1626, 0.1190).finished()) - (12, (Matrix(3,2) << 0.4984, 0.5853, 0.9597, 0.2238, 0.3404, 0.7513).finished()), - 2, Vector3(0.4314, 0.9106, 0.1818)))) - (MakeClique(GaussianConditional(pair_list_of (7, (Matrix(3,1) << 0.2551, 0, 0).finished()) - (8, (Matrix(3,2) << 0.8909, 0.1386, 0.9593, 0.1493, 0, 0.2575).finished()) - (11, (Matrix(3,1) << 0.8407, 0.2543, 0.8143).finished()), - 2, Vector3(0.3998, 0.2599, 0.8001)), list_of - (MakeClique(GaussianConditional(pair_list_of (5, (Matrix(3,1) << 0.2435, 0, 0).finished()) - (6, (Matrix(3,2) << 0.4733, 0.1966, 0.3517, 0.2511, 0.8308, 0.0).finished()) - // NOTE the non-upper-triangular form - // here since this test was written when we had column permutations - // from LDL. The code still works currently (does not enfore - // upper-triangularity in this case) but this test will need to be - // redone if this stops working in the future - (7, (Matrix(3,1) << 0.5853, 0.5497, 0.9172).finished()) - (8, (Matrix(3,2) << 0.2858, 0.3804, 0.7572, 0.5678, 0.7537, 0.0759).finished()), - 2, Vector3(0.8173, 0.8687, 0.0844)), list_of - (MakeClique(GaussianConditional(pair_list_of (3, (Matrix(3,1) << 0.0540, 0, 0).finished()) - (4, (Matrix(3,2) << 0.9340, 0.4694, 0.1299, 0.0119, 0, 0.3371).finished()) - (6, (Matrix(3,2) << 0.1622, 0.5285, 0.7943, 0.1656, 0.3112, 0.6020).finished()), - 2, Vector3(0.9619, 0.0046, 0.7749)))) - (MakeClique(GaussianConditional(pair_list_of (1, (Matrix(3,1) << 0.2630, 0, 0).finished()) - (2, (Matrix(3,2) << 0.7482, 0.2290, 0.4505, 0.9133, 0, 0.1524).finished()) - (5, (Matrix(3,1) << 0.8258, 0.5383, 0.9961).finished()), - 2, Vector3(0.0782, 0.4427, 0.1067)))))))))); + bt.insertRoot(MakeClique( + gc1, Children{LeafClique(gc2), + MakeClique(gc3, Children{MakeClique( + gc4, Children{LeafClique(gc5), + LeafClique(gc6)})})})); // Marginal on 5 Matrix expectedCov = (Matrix(1,1) << 236.5166).finished(); @@ -201,61 +225,33 @@ double computeError(const GaussianBayesTree& gbt, const Vector10& values) { /* ************************************************************************* */ TEST(GaussianBayesTree, ComputeSteepestDescentPointBT) { + const GaussianConditional gc1( + Pairs{{2, (Matrix(6, 2) << 31.0, 32.0, 0.0, 34.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0) + .finished()}, + {3, (Matrix(6, 2) << 35.0, 36.0, 37.0, 38.0, 41.0, 42.0, 0.0, 44.0, + 0.0, 0.0, 0.0, 0.0) + .finished()}, + {4, (Matrix(6, 2) << 0.0, 0.0, 0.0, 0.0, 45.0, 46.0, 47.0, 48.0, + 51.0, 52.0, 0.0, 54.0) + .finished()}}, + 3, (Vector(6) << 29.0, 30.0, 39.0, 40.0, 49.0, 50.0).finished()), + gc2(Pairs{{0, (Matrix(4, 2) << 3.0, 4.0, 0.0, 6.0, 0.0, 0.0, 0.0, 0.0) + .finished()}, + {1, (Matrix(4, 2) << 0.0, 0.0, 0.0, 0.0, 17.0, 18.0, 0.0, 20.0) + .finished()}, + {2, (Matrix(4, 2) << 0.0, 0.0, 0.0, 0.0, 21.0, 22.0, 23.0, 24.0) + .finished()}, + {3, (Matrix(4, 2) << 7.0, 8.0, 9.0, 10.0, 0.0, 0.0, 0.0, 0.0) + .finished()}, + {4, (Matrix(4, 2) << 11.0, 12.0, 13.0, 14.0, 25.0, 26.0, 27.0, + 28.0) + .finished()}}, + 2, (Vector(4) << 1.0, 2.0, 15.0, 16.0).finished()); // Create an arbitrary Bayes Tree GaussianBayesTree bt; - bt.insertRoot(MakeClique(GaussianConditional( - pair_list_of - (2, (Matrix(6, 2) << - 31.0,32.0, - 0.0,34.0, - 0.0,0.0, - 0.0,0.0, - 0.0,0.0, - 0.0,0.0).finished()) - (3, (Matrix(6, 2) << - 35.0,36.0, - 37.0,38.0, - 41.0,42.0, - 0.0,44.0, - 0.0,0.0, - 0.0,0.0).finished()) - (4, (Matrix(6, 2) << - 0.0,0.0, - 0.0,0.0, - 45.0,46.0, - 47.0,48.0, - 51.0,52.0, - 0.0,54.0).finished()), - 3, (Vector(6) << 29.0,30.0,39.0,40.0,49.0,50.0).finished()), list_of - (MakeClique(GaussianConditional( - pair_list_of - (0, (Matrix(4, 2) << - 3.0,4.0, - 0.0,6.0, - 0.0,0.0, - 0.0,0.0).finished()) - (1, (Matrix(4, 2) << - 0.0,0.0, - 0.0,0.0, - 17.0,18.0, - 0.0,20.0).finished()) - (2, (Matrix(4, 2) << - 0.0,0.0, - 0.0,0.0, - 21.0,22.0, - 23.0,24.0).finished()) - (3, (Matrix(4, 2) << - 7.0,8.0, - 9.0,10.0, - 0.0,0.0, - 0.0,0.0).finished()) - (4, (Matrix(4, 2) << - 11.0,12.0, - 13.0,14.0, - 25.0,26.0, - 27.0,28.0).finished()), - 2, (Vector(4) << 1.0,2.0,15.0,16.0).finished()))))); + bt.insertRoot(MakeClique(gc1, Children{LeafClique(gc2)})); // Compute the Hessian numerically Matrix hessian = numericalHessian( @@ -276,12 +272,11 @@ TEST(GaussianBayesTree, ComputeSteepestDescentPointBT) { Vector expected = gradient * step; // Known steepest descent point from Bayes' net version - VectorValues expectedFromBN = pair_list_of - (0, Vector2(0.000129034, 0.000688183)) - (1, Vector2(0.0109679, 0.0253767)) - (2, Vector2(0.0680441, 0.114496)) - (3, Vector2(0.16125, 0.241294)) - (4, Vector2(0.300134, 0.423233)); + VectorValues expectedFromBN{{0, Vector2(0.000129034, 0.000688183)}, + {1, Vector2(0.0109679, 0.0253767)}, + {2, Vector2(0.0680441, 0.114496)}, + {3, Vector2(0.16125, 0.241294)}, + {4, Vector2(0.300134, 0.423233)}}; // Compute the steepest descent point with the dogleg function VectorValues actual = bt.optimizeGradientSearch(); diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index 6ec06a0ceb..0479ce9a11 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -25,12 +25,9 @@ #include #include #include +#include -#include -#include -#include #include -#include #include #include @@ -38,7 +35,6 @@ using namespace gtsam; using namespace std; -using namespace boost::assign; using symbol_shorthand::X; using symbol_shorthand::Y; @@ -48,6 +44,8 @@ static Matrix R = (Matrix(2, 2) << -12.1244, -5.1962, 0., 4.6904).finished(); +using Dims = std::vector; + /* ************************************************************************* */ TEST(GaussianConditional, constructor) { @@ -64,11 +62,7 @@ TEST(GaussianConditional, constructor) Vector d = Vector2(1.0, 2.0); SharedDiagonal s = noiseModel::Diagonal::Sigmas(Vector2(3.0, 4.0)); - vector > terms = pair_list_of - (1, R) - (3, S1) - (5, S2) - (7, S3); + vector > terms = {{1, R}, {3, S1}, {5, S2}, {7, S3}}; GaussianConditional actual(terms, 1, d, s); @@ -130,6 +124,89 @@ TEST( GaussianConditional, equals ) EXPECT( expected.equals(actual) ); } +/* ************************************************************************* */ +namespace density { +static const Key key = 77; +static constexpr double sigma = 3.0; +static const auto unitPrior = + GaussianConditional(key, Vector1::Constant(5), I_1x1), + widerPrior = GaussianConditional( + key, Vector1::Constant(5), I_1x1, + noiseModel::Isotropic::Sigma(1, sigma)); +} // namespace density + +/* ************************************************************************* */ +// Check that the evaluate function matches direct calculation with R. +TEST(GaussianConditional, Evaluate1) { + // Let's evaluate at the mean + const VectorValues mean = density::unitPrior.solve(VectorValues()); + + // We get the Hessian matrix, which has noise model applied! + const Matrix invSigma = density::unitPrior.information(); + + // A Gaussian density ~ exp (-0.5*(Rx-d)'*(Rx-d)) + // which at the mean is 1.0! So, the only thing we need to calculate is + // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). + // The covariance matrix inv(Sigma) = R'*R, so the determinant is + const double expected = sqrt((invSigma / (2 * M_PI)).determinant()); + const double actual = density::unitPrior.evaluate(mean); + EXPECT_DOUBLES_EQUAL(expected, actual, 1e-9); + + using density::key; + using density::sigma; + + // Check Invariants at the mean and a different value + for (auto vv : {mean, VectorValues{{key, Vector1(4)}}}) { + EXPECT(GaussianConditional::CheckInvariants(density::unitPrior, vv)); + EXPECT(GaussianConditional::CheckInvariants(density::unitPrior, + HybridValues{vv, {}, {}})); + } + + // Let's numerically integrate and see that we integrate to 1.0. + double integral = 0.0; + // Loop from -5*sigma to 5*sigma in 0.1*sigma steps: + for (double x = -5.0 * sigma; x <= 5.0 * sigma; x += 0.1 * sigma) { + VectorValues xValues; + xValues.insert(key, mean.at(key) + Vector1(x)); + const double density = density::unitPrior.evaluate(xValues); + integral += 0.1 * sigma * density; + } + EXPECT_DOUBLES_EQUAL(1.0, integral, 1e-9); +} + +/* ************************************************************************* */ +// Check the evaluate with non-unit noise. +TEST(GaussianConditional, Evaluate2) { + // See comments in test above. + const VectorValues mean = density::widerPrior.solve(VectorValues()); + const Matrix R = density::widerPrior.R(); + const Matrix invSigma = density::widerPrior.information(); + const double expected = sqrt((invSigma / (2 * M_PI)).determinant()); + const double actual = density::widerPrior.evaluate(mean); + EXPECT_DOUBLES_EQUAL(expected, actual, 1e-9); + + using density::key; + using density::sigma; + + // Check Invariants at the mean and a different value + for (auto vv : {mean, VectorValues{{key, Vector1(4)}}}) { + EXPECT(GaussianConditional::CheckInvariants(density::widerPrior, vv)); + EXPECT(GaussianConditional::CheckInvariants(density::widerPrior, + HybridValues{vv, {}, {}})); + } + + // Let's numerically integrate and see that we integrate to 1.0. + double integral = 0.0; + // Loop from -5*sigma to 5*sigma in 0.1*sigma steps: + for (double x = -5.0 * sigma; x <= 5.0 * sigma; x += 0.1 * sigma) { + VectorValues xValues; + xValues.insert(key, mean.at(key) + Vector1(x)); + const double density = density::widerPrior.evaluate(xValues); + integral += 0.1 * sigma * density; + } + EXPECT_DOUBLES_EQUAL(1.0, integral, 1e-5); +} + /* ************************************************************************* */ TEST( GaussianConditional, solve ) { @@ -154,14 +231,10 @@ TEST( GaussianConditional, solve ) Vector sx1(2); sx1 << 1.0, 1.0; Vector sl1(2); sl1 << 1.0, 1.0; - VectorValues expected = map_list_of - (1, expectedX) - (2, sx1) - (10, sl1); + VectorValues expected = {{1, expectedX}, {2, sx1}, {10, sl1}}; - VectorValues solution = map_list_of - (2, sx1) // parents - (10, sl1); + VectorValues solution = {{2, sx1}, // parents + {10, sl1}}; solution.insert(cg.solve(solution)); EXPECT(assert_equal(expected, solution, tol)); @@ -171,7 +244,7 @@ TEST( GaussianConditional, solve ) TEST( GaussianConditional, solve_simple ) { // 2 variables, frontal has dim=4 - VerticalBlockMatrix blockMatrix(list_of(4)(2)(1), 4); + VerticalBlockMatrix blockMatrix(Dims{4, 2, 1}, 4); blockMatrix.matrix() << 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, @@ -179,18 +252,16 @@ TEST( GaussianConditional, solve_simple ) 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4; // solve system as a non-multifrontal version first - GaussianConditional cg(list_of(1)(2), 1, blockMatrix); + GaussianConditional cg(KeyVector{1,2}, 1, blockMatrix); // partial solution Vector sx1 = Vector2(9.0, 10.0); // elimination order: 1, 2 - VectorValues actual = map_list_of - (2, sx1); // parent + VectorValues actual = {{2, sx1}}; // parent - VectorValues expected = map_list_of - (2, sx1) - (1, (Vector(4) << -3.1,-3.4,-11.9,-13.2).finished()); + VectorValues expected = { + {2, sx1}, {1, (Vector(4) << -3.1, -3.4, -11.9, -13.2).finished()}}; // verify indices/size EXPECT_LONGS_EQUAL(2, (long)cg.size()); @@ -205,7 +276,7 @@ TEST( GaussianConditional, solve_simple ) TEST( GaussianConditional, solve_multifrontal ) { // create full system, 3 variables, 2 frontals, all 2 dim - VerticalBlockMatrix blockMatrix(list_of(2)(2)(2)(1), 4); + VerticalBlockMatrix blockMatrix(Dims{2, 2, 2, 1}, 4); blockMatrix.matrix() << 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, @@ -213,7 +284,7 @@ TEST( GaussianConditional, solve_multifrontal ) 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4; // 3 variables, all dim=2 - GaussianConditional cg(list_of(1)(2)(10), 2, blockMatrix); + GaussianConditional cg(KeyVector{1, 2, 10}, 2, blockMatrix); EXPECT(assert_equal(Vector(blockMatrix.full().rightCols(1)), cg.d())); @@ -221,13 +292,10 @@ TEST( GaussianConditional, solve_multifrontal ) Vector sl1 = Vector2(9.0, 10.0); // elimination order; _x_, _x1_, _l1_ - VectorValues actual = map_list_of - (10, sl1); // parent + VectorValues actual = {{10, sl1}}; // parent - VectorValues expected = map_list_of - (1, Vector2(-3.1,-3.4)) - (2, Vector2(-11.9,-13.2)) - (10, sl1); + VectorValues expected = { + {1, Vector2(-3.1, -3.4)}, {2, Vector2(-11.9, -13.2)}, {10, sl1}}; // verify indices/size EXPECT_LONGS_EQUAL(3, (long)cg.size()); @@ -253,21 +321,18 @@ TEST( GaussianConditional, solveTranspose ) { d2(0) = 5; // define nodes and specify in reverse topological sort (i.e. parents last) - GaussianBayesNet cbn = list_of - (GaussianConditional(1, d1, R11, 2, S12)) - (GaussianConditional(1, d2, R22)); + GaussianBayesNet cbn; + cbn.emplace_shared(1, d1, R11, 2, S12); + cbn.emplace_shared(1, d2, R22); // x=R'*y, y=inv(R')*x // 2 = 1 2 // 5 1 1 3 - VectorValues - x = map_list_of - (1, (Vector(1) << 2.).finished()) - (2, (Vector(1) << 5.).finished()), - y = map_list_of - (1, (Vector(1) << 2.).finished()) - (2, (Vector(1) << 3.).finished()); + VectorValues x = {{1, (Vector(1) << 2.).finished()}, + {2, (Vector(1) << 5.).finished()}}, + y = {{1, (Vector(1) << 2.).finished()}, + {2, (Vector(1) << 3.).finished()}}; // test functional version VectorValues actual = cbn.backSubstituteTranspose(x); @@ -326,7 +391,7 @@ TEST(GaussianConditional, FromMeanAndStddev) { const Vector2 b(20, 40), x0(1, 2), x1(3, 4), x2(5, 6); const double sigma = 3; - VectorValues values = map_list_of(X(0), x0)(X(1), x1)(X(2), x2); + VectorValues values{{X(0), x0}, {X(1), x1}, {X(2), x2}}; auto conditional1 = GaussianConditional::FromMeanAndStddev(X(0), A1, X(1), b, sigma); @@ -339,6 +404,13 @@ TEST(GaussianConditional, FromMeanAndStddev) { Vector2 e2 = (x0 - (A1 * x1 + A2 * x2 + b)) / sigma; double expected2 = 0.5 * e2.dot(e2); EXPECT_DOUBLES_EQUAL(expected2, conditional2.error(values), 1e-9); + + // Check Invariants for both conditionals + for (auto conditional : {conditional1, conditional2}) { + EXPECT(GaussianConditional::CheckInvariants(conditional, values)); + EXPECT(GaussianConditional::CheckInvariants(conditional, + HybridValues{values, {}, {}})); + } } /* ************************************************************************* */ @@ -397,6 +469,36 @@ TEST(GaussianConditional, sample) { // EXPECT(assert_equal(Vector2(31.0111856, 64.9850775), actual2[X(0)], 1e-5)); } +/* ************************************************************************* */ +TEST(GaussianConditional, Error) { + // Create univariate standard gaussian conditional + auto stdGaussian = + GaussianConditional::FromMeanAndStddev(X(0), Vector1::Zero(), 1.0); + VectorValues values; + values.insert(X(0), Vector1::Zero()); + double logProbability = stdGaussian.logProbability(values); + + // Regression. + // These values were computed by hand for a univariate standard gaussian. + EXPECT_DOUBLES_EQUAL(-0.9189385332046727, logProbability, 1e-9); + EXPECT_DOUBLES_EQUAL(0.3989422804014327, exp(logProbability), 1e-9); + EXPECT_DOUBLES_EQUAL(stdGaussian(values), exp(logProbability), 1e-9); +} + +/* ************************************************************************* */ +// Similar test for multivariate gaussian but with sigma 2.0 +TEST(GaussianConditional, LogNormalizationConstant) { + double sigma = 2.0; + auto conditional = GaussianConditional::FromMeanAndStddev(X(0), Vector3::Zero(), sigma); + VectorValues x; + x.insert(X(0), Vector3::Zero()); + Matrix3 Sigma = I_3x3 * sigma * sigma; + double expectedLogNormalizingConstant = log(1 / sqrt((2 * M_PI * Sigma).determinant())); + + EXPECT_DOUBLES_EQUAL(expectedLogNormalizingConstant, + conditional.logNormalizationConstant(), 1e-9); +} + /* ************************************************************************* */ TEST(GaussianConditional, Print) { Matrix A1 = (Matrix(2, 2) << 1., 2., 3., 4.).finished(); @@ -413,6 +515,8 @@ TEST(GaussianConditional, Print) { " R = [ 1 0 ]\n" " [ 0 1 ]\n" " d = [ 20 40 ]\n" + " mean: 1 elements\n" + " x0: 20 40\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected, conditional, "GaussianConditional")); diff --git a/gtsam/linear/tests/testGaussianDensity.cpp b/gtsam/linear/tests/testGaussianDensity.cpp index 14608e7949..97eb0de704 100644 --- a/gtsam/linear/tests/testGaussianDensity.cpp +++ b/gtsam/linear/tests/testGaussianDensity.cpp @@ -52,8 +52,11 @@ TEST(GaussianDensity, FromMeanAndStddev) { auto density = GaussianDensity::FromMeanAndStddev(X(0), b, sigma); Vector2 e = (x0 - b) / sigma; - double expected = 0.5 * e.dot(e); - EXPECT_DOUBLES_EQUAL(expected, density.error(values), 1e-9); + double expected1 = 0.5 * e.dot(e); + EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9); + + double expected2 = density.logNormalizationConstant()- 0.5 * e.dot(e); + EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9); } /* ************************************************************************* */ diff --git a/gtsam/linear/tests/testGaussianFactorGraph.cpp b/gtsam/linear/tests/testGaussianFactorGraph.cpp index 41464a1109..0d7003ccbc 100644 --- a/gtsam/linear/tests/testGaussianFactorGraph.cpp +++ b/gtsam/linear/tests/testGaussianFactorGraph.cpp @@ -26,10 +26,6 @@ #include #include -#include -#include // for operator += -using namespace boost::assign; - #include #include @@ -212,8 +208,9 @@ TEST(GaussianFactorGraph, gradient) { // 2*f(x) = 100*(x1+c[X(1)])^2 + 100*(x2-x1-[0.2;-0.1])^2 + 25*(l1-x1-[0.0;0.2])^2 + // 25*(l1-x2-[-0.2;0.3])^2 // worked out: df/dx1 = 100*[0.1;0.1] + 100*[0.2;-0.1]) + 25*[0.0;0.2] = [10+20;10-10+5] = [30;5] - VectorValues expected = map_list_of(1, Vector2(5.0, -12.5))(2, Vector2(30.0, 5.0))( - 0, Vector2(-25.0, 17.5)); + VectorValues expected{{1, Vector2(5.0, -12.5)}, + {2, Vector2(30.0, 5.0)}, + {0, Vector2(-25.0, 17.5)}}; // Check the gradient at delta=0 VectorValues zero = VectorValues::Zero(expected); @@ -231,8 +228,8 @@ TEST(GaussianFactorGraph, gradient) { TEST(GaussianFactorGraph, transposeMultiplication) { GaussianFactorGraph A = createSimpleGaussianFactorGraph(); - Errors e; - e += Vector2(0.0, 0.0), Vector2(15.0, 0.0), Vector2(0.0, -5.0), Vector2(-7.5, -5.0); + Errors e = {Vector2(0.0, 0.0), Vector2(15.0, 0.0), Vector2(0.0, -5.0), + Vector2(-7.5, -5.0)}; VectorValues expected; expected.insert(1, Vector2(-37.5, -50.0)); @@ -288,7 +285,7 @@ TEST(GaussianFactorGraph, matrices2) { TEST(GaussianFactorGraph, multiplyHessianAdd) { GaussianFactorGraph gfg = createSimpleGaussianFactorGraph(); - VectorValues x = map_list_of(0, Vector2(1, 2))(1, Vector2(3, 4))(2, Vector2(5, 6)); + const VectorValues x{{0, Vector2(1, 2)}, {1, Vector2(3, 4)}, {2, Vector2(5, 6)}}; VectorValues expected; expected.insert(0, Vector2(-450, -450)); @@ -307,8 +304,8 @@ TEST(GaussianFactorGraph, multiplyHessianAdd) { /* ************************************************************************* */ static GaussianFactorGraph createGaussianFactorGraphWithHessianFactor() { GaussianFactorGraph gfg = createSimpleGaussianFactorGraph(); - gfg += HessianFactor(1, 2, 100*I_2x2, Z_2x2, Vector2(0.0, 1.0), - 400*I_2x2, Vector2(1.0, 1.0), 3.0); + gfg.emplace_shared(1, 2, 100 * I_2x2, Z_2x2, Vector2(0.0, 1.0), + 400 * I_2x2, Vector2(1.0, 1.0), 3.0); return gfg; } @@ -326,8 +323,7 @@ TEST(GaussianFactorGraph, multiplyHessianAdd2) { Y << -450, -450, 300, 400, 2950, 3450; EXPECT(assert_equal(Y, AtA * X)); - VectorValues x = map_list_of(0, Vector2(1, 2))(1, Vector2(3, 4))(2, Vector2(5, 6)); - + const VectorValues x {{0, Vector2(1, 2)}, {1, Vector2(3, 4)}, {2, Vector2(5, 6)}}; VectorValues expected; expected.insert(0, Vector2(-450, -450)); expected.insert(1, Vector2(300, 400)); @@ -371,10 +367,10 @@ TEST(GaussianFactorGraph, gradientAtZero) { /* ************************************************************************* */ TEST(GaussianFactorGraph, clone) { // 2 variables, frontal has dim=4 - VerticalBlockMatrix blockMatrix(list_of(4)(2)(1), 4); + VerticalBlockMatrix blockMatrix(KeyVector{4, 2, 1}, 4); blockMatrix.matrix() << 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4; - GaussianConditional cg(list_of(1)(2), 1, blockMatrix); + GaussianConditional cg(KeyVector{1, 2}, 1, blockMatrix); GaussianFactorGraph init_graph = createGaussianFactorGraphWithHessianFactor(); init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor diff --git a/gtsam/linear/tests/testHessianFactor.cpp b/gtsam/linear/tests/testHessianFactor.cpp index 3e13ecf10a..92ffe71acc 100644 --- a/gtsam/linear/tests/testHessianFactor.cpp +++ b/gtsam/linear/tests/testHessianFactor.cpp @@ -25,11 +25,6 @@ #include -#include -#include -#include -using namespace boost::assign; - #include #include @@ -38,6 +33,8 @@ using namespace gtsam; const double tol = 1e-5; +using Dims = std::vector; // For constructing block matrices + /* ************************************************************************* */ TEST(HessianFactor, Slot) { @@ -61,8 +58,8 @@ TEST(HessianFactor, emptyConstructor) /* ************************************************************************* */ TEST(HessianFactor, ConversionConstructor) { - HessianFactor expected(list_of(0)(1), - SymmetricBlockMatrix(list_of(2)(4)(1), (Matrix(7,7) << + HessianFactor expected(KeyVector{0, 1}, + SymmetricBlockMatrix(Dims{2, 4, 1}, (Matrix(7,7) << 125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000, 0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000, -25.0000, 0.0, 25.0000, 0.0, 0.0, 0.0, -5.0000, @@ -85,9 +82,7 @@ TEST(HessianFactor, ConversionConstructor) HessianFactor actual(jacobian); - VectorValues values = pair_list_of - (0, Vector2(1.0, 2.0)) - (1, (Vector(4) << 3.0, 4.0, 5.0, 6.0).finished()); + VectorValues values{{0, Vector2(1.0, 2.0)}, {1, Vector4(3.0, 4.0, 5.0, 6.0)}}; EXPECT_LONGS_EQUAL(2, (long)actual.size()); EXPECT(assert_equal(expected, actual, 1e-9)); @@ -108,7 +103,7 @@ TEST(HessianFactor, Constructor1) EXPECT(assert_equal(g, Vector(factor.linearTerm()))); EXPECT_LONGS_EQUAL(1, (long)factor.size()); - VectorValues dx = pair_list_of(0, Vector2(1.5, 2.5)); + VectorValues dx{{0, Vector2(1.5, 2.5)}}; // error 0.5*(f - 2*x'*g + x'*G*x) double expected = 80.375; @@ -150,9 +145,7 @@ TEST(HessianFactor, Constructor2) Vector dx0 = (Vector(1) << 0.5).finished(); Vector dx1 = Vector2(1.5, 2.5); - VectorValues dx = pair_list_of - (0, dx0) - (1, dx1); + VectorValues dx{{0, dx0}, {1, dx1}}; HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f); @@ -171,10 +164,7 @@ TEST(HessianFactor, Constructor2) EXPECT(assert_equal(G22, factor.info().diagonalBlock(1))); // Check case when vector values is larger than factor - VectorValues dxLarge = pair_list_of - (0, dx0) - (1, dx1) - (2, Vector2(0.1, 0.2)); + VectorValues dxLarge {{0, dx0}, {1, dx1}, {2, Vector2(0.1, 0.2)}}; EXPECT_DOUBLES_EQUAL(expected, factor.error(dxLarge), 1e-10); } @@ -200,11 +190,7 @@ TEST(HessianFactor, Constructor3) Vector dx1 = Vector2(1.5, 2.5); Vector dx2 = Vector3(1.5, 2.5, 3.5); - VectorValues dx = pair_list_of - (0, dx0) - (1, dx1) - (2, dx2); - + VectorValues dx {{0, dx0}, {1, dx1}, {2, dx2}}; HessianFactor factor(0, 1, 2, G11, G12, G13, g1, G22, G23, g2, G33, g3, f); double expected = 371.3750; @@ -247,10 +233,7 @@ TEST(HessianFactor, ConstructorNWay) Vector dx1 = Vector2(1.5, 2.5); Vector dx2 = Vector3(1.5, 2.5, 3.5); - VectorValues dx = pair_list_of - (0, dx0) - (1, dx1) - (2, dx2); + VectorValues dx {{0, dx0}, {1, dx1}, {2, dx2}}; KeyVector js {0, 1, 2}; std::vector Gs; @@ -309,7 +292,7 @@ TEST(HessianFactor, CombineAndEliminate1) { hessian.augmentedInformation(), 1e-9)); // perform elimination on jacobian - Ordering ordering = list_of(1); + Ordering ordering {1}; GaussianConditional::shared_ptr expectedConditional; JacobianFactor::shared_ptr expectedFactor; boost::tie(expectedConditional, expectedFactor) = jacobian.eliminate(ordering); @@ -372,7 +355,7 @@ TEST(HessianFactor, CombineAndEliminate2) { hessian.augmentedInformation(), 1e-9)); // perform elimination on jacobian - Ordering ordering = list_of(0); + Ordering ordering {0}; GaussianConditional::shared_ptr expectedConditional; JacobianFactor::shared_ptr expectedFactor; boost::tie(expectedConditional, expectedFactor) = // @@ -437,10 +420,10 @@ TEST(HessianFactor, eliminate2 ) // eliminate the combined factor HessianFactor::shared_ptr combinedLF_Chol(new HessianFactor(combined)); - GaussianFactorGraph combinedLFG_Chol = list_of(combinedLF_Chol); + GaussianFactorGraph combinedLFG_Chol {combinedLF_Chol}; std::pair actual_Chol = - EliminateCholesky(combinedLFG_Chol, Ordering(list_of(0))); + EliminateCholesky(combinedLFG_Chol, Ordering{0}); // create expected Conditional Gaussian double oldSigma = 0.0894427; // from when R was made unit @@ -483,8 +466,8 @@ TEST(HessianFactor, combine) { 0.0, -8.94427191).finished(); Vector b = Vector2(2.23606798,-1.56524758); SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector::Ones(2)); - GaussianFactor::shared_ptr f(new JacobianFactor(0, A0, 1, A1, 2, A2, b, model)); - GaussianFactorGraph factors = list_of(f); + GaussianFactorGraph factors{ + boost::make_shared(0, A0, 1, A1, 2, A2, b, model)}; // Form Ab' * Ab HessianFactor actual(factors); @@ -514,7 +497,7 @@ TEST(HessianFactor, gradientAtZero) HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f); // test gradient at zero - VectorValues expectedG = pair_list_of(0, -g1) (1, -g2); + VectorValues expectedG{{0, -g1}, {1, -g2}}; Matrix A; Vector b; boost::tie(A,b) = factor.jacobian(); KeyVector keys {0, 1}; EXPECT(assert_equal(-A.transpose()*b, expectedG.vector(keys))); @@ -537,7 +520,7 @@ TEST(HessianFactor, gradient) // test gradient Vector x0 = (Vector(1) << 3.0).finished(); Vector x1 = (Vector(2) << -3.5, 7.1).finished(); - VectorValues x = pair_list_of(0, x0) (1, x1); + VectorValues x {{0, x0}, {1, x1}}; Vector expectedGrad0 = (Vector(1) << 10.0).finished(); Vector expectedGrad1 = (Vector(2) << 4.5, 16.1).finished(); diff --git a/gtsam/linear/tests/testJacobianFactor.cpp b/gtsam/linear/tests/testJacobianFactor.cpp index 21146066d7..e0f4d8a0b9 100644 --- a/gtsam/linear/tests/testJacobianFactor.cpp +++ b/gtsam/linear/tests/testJacobianFactor.cpp @@ -25,26 +25,24 @@ #include #include -#include -#include #include #include using namespace std; using namespace gtsam; -using namespace boost::assign; + +using Dims = std::vector; // For constructing block matrices namespace { namespace simple { // Terms we'll use - const vector > terms = list_of > - (make_pair(5, Matrix3::Identity())) - (make_pair(10, 2*Matrix3::Identity())) - (make_pair(15, 3*Matrix3::Identity())); - - // RHS and sigmas - const Vector b = Vector3(1., 2., 3.); - const SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector3(0.5, 0.5, 0.5)); + const vector > terms{ + {5, I_3x3}, {10, 2 * I_3x3}, {15, 3 * I_3x3}}; + + // RHS and sigmas + const Vector b = Vector3(1., 2., 3.); + const SharedDiagonal noise = + noiseModel::Diagonal::Sigmas(Vector3(0.5, 0.5, 0.5)); } } @@ -128,7 +126,7 @@ TEST(JacobianFactor, constructors_and_accessors) // VerticalBlockMatrix constructor JacobianFactor expected( boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, noise); - VerticalBlockMatrix blockMatrix(list_of(3)(3)(3)(1), 3); + VerticalBlockMatrix blockMatrix(Dims{3, 3, 3, 1}, 3); blockMatrix(0) = terms[0].second; blockMatrix(1) = terms[1].second; blockMatrix(2) = terms[2].second; @@ -191,22 +189,25 @@ Key keyX(10), keyY(8), keyZ(12); double sigma1 = 0.1; Matrix A11 = I_2x2; Vector2 b1(2, -1); -JacobianFactor factor1(keyX, A11, b1, noiseModel::Isotropic::Sigma(2, sigma1)); +auto factor1 = boost::make_shared( + keyX, A11, b1, noiseModel::Isotropic::Sigma(2, sigma1)); double sigma2 = 0.5; Matrix A21 = -2 * I_2x2; Matrix A22 = 3 * I_2x2; Vector2 b2(4, -5); -JacobianFactor factor2(keyX, A21, keyY, A22, b2, noiseModel::Isotropic::Sigma(2, sigma2)); +auto factor2 = boost::make_shared( + keyX, A21, keyY, A22, b2, noiseModel::Isotropic::Sigma(2, sigma2)); double sigma3 = 1.0; Matrix A32 = -4 * I_2x2; Matrix A33 = 5 * I_2x2; Vector2 b3(3, -6); -JacobianFactor factor3(keyY, A32, keyZ, A33, b3, noiseModel::Isotropic::Sigma(2, sigma3)); +auto factor3 = boost::make_shared( + keyY, A32, keyZ, A33, b3, noiseModel::Isotropic::Sigma(2, sigma3)); -GaussianFactorGraph factors(list_of(factor1)(factor2)(factor3)); -Ordering ordering(list_of(keyX)(keyY)(keyZ)); +const GaussianFactorGraph factors { factor1, factor2, factor3 }; +const Ordering ordering { keyX, keyY, keyZ }; } /* ************************************************************************* */ @@ -436,11 +437,11 @@ TEST(JacobianFactor, eliminate) Vector9 sigmas; sigmas << s1, s0, s2; JacobianFactor combinedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); - GaussianFactorGraph::EliminationResult expected = combinedFactor.eliminate(list_of(0)); + GaussianFactorGraph::EliminationResult expected = combinedFactor.eliminate(Ordering{0}); JacobianFactor::shared_ptr expectedJacobian = boost::dynamic_pointer_cast< JacobianFactor>(expected.second); - GaussianFactorGraph::EliminationResult actual = EliminateQR(gfg, list_of(0)); + GaussianFactorGraph::EliminationResult actual = EliminateQR(gfg, Ordering{0}); JacobianFactor::shared_ptr actualJacobian = boost::dynamic_pointer_cast< JacobianFactor>(actual.second); @@ -487,7 +488,7 @@ TEST(JacobianFactor, eliminate2 ) // eliminate the combined factor pair - actual = combined.eliminate(Ordering(list_of(2))); + actual = combined.eliminate(Ordering{2}); // create expected Conditional Gaussian double oldSigma = 0.0894427; // from when R was made unit @@ -539,11 +540,11 @@ TEST(JacobianFactor, EliminateQR) // Create factor graph const SharedDiagonal sig_4D = noiseModel::Isotropic::Sigma(4, 0.5); const SharedDiagonal sig_2D = noiseModel::Isotropic::Sigma(2, 0.5); - GaussianFactorGraph factors = list_of - (JacobianFactor(list_of(3)(5)(7)(9)(11), VerticalBlockMatrix(list_of(2)(2)(2)(2)(2)(1), Ab.block(0, 0, 4, 11)), sig_4D)) - (JacobianFactor(list_of(5)(7)(9)(11), VerticalBlockMatrix(list_of(2)(2)(2)(2)(1), Ab.block(4, 2, 4, 9)), sig_4D)) - (JacobianFactor(list_of(7)(9)(11), VerticalBlockMatrix(list_of(2)(2)(2)(1), Ab.block(8, 4, 4, 7)), sig_4D)) - (JacobianFactor(list_of(11), VerticalBlockMatrix(list_of(2)(1), Ab.block(12, 8, 2, 3)), sig_2D)); + GaussianFactorGraph factors = { + boost::make_shared(KeyVector{3, 5, 7, 9, 11}, VerticalBlockMatrix(Dims{2, 2, 2, 2, 2, 1}, Ab.block(0, 0, 4, 11)), sig_4D), + boost::make_shared(KeyVector{5, 7, 9, 11}, VerticalBlockMatrix(Dims{2, 2, 2, 2, 1}, Ab.block(4, 2, 4, 9)), sig_4D), + boost::make_shared(KeyVector{7, 9, 11}, VerticalBlockMatrix(Dims{2, 2, 2, 1}, Ab.block(8, 4, 4, 7)), sig_4D), + boost::make_shared(KeyVector{11}, VerticalBlockMatrix(Dims{2, 1}, Ab.block(12, 8, 2, 3)), sig_2D)}; // extract the dense matrix for the graph Matrix actualDense = factors.augmentedJacobian(); @@ -562,12 +563,12 @@ TEST(JacobianFactor, EliminateQR) 0., 0., 0., 0., 0., 0., 0., 0., 8.2503, 3.3757, 6.8476, 0., 0., 0., 0., 0., 0., 0., 0., 0., -5.7095, -0.0090).finished(); - GaussianConditional expectedFragment(list_of(3)(5)(7)(9)(11), 3, - VerticalBlockMatrix(list_of(2)(2)(2)(2)(2)(1), R.topRows(6)), + GaussianConditional expectedFragment(KeyVector{3,5,7,9,11}, 3, + VerticalBlockMatrix(Dims{2, 2, 2, 2, 2, 1}, R.topRows(6)), noiseModel::Unit::Create(6)); // Eliminate (3 frontal variables, 6 scalar columns) using QR !!!! - GaussianFactorGraph::EliminationResult actual = EliminateQR(factors, list_of(3)(5)(7)); + GaussianFactorGraph::EliminationResult actual = EliminateQR(factors, Ordering{3, 5, 7}); const JacobianFactor &actualJF = dynamic_cast(*actual.second); EXPECT(assert_equal(expectedFragment, *actual.first, 0.001)); @@ -589,7 +590,7 @@ TEST ( JacobianFactor, constraint_eliminate1 ) // eliminate it pair - actual = lc.eliminate(list_of(1)); + actual = lc.eliminate(Ordering{1}); // verify linear factor is a null ptr EXPECT(actual.second->empty()); @@ -617,7 +618,7 @@ TEST ( JacobianFactor, constraint_eliminate2 ) // eliminate x and verify results pair - actual = lc.eliminate(list_of(1)); + actual = lc.eliminate(Ordering{1}); // LF should be empty // It's tricky to create Eigen matrices that are only zero along one dimension @@ -648,7 +649,7 @@ TEST(JacobianFactor, OverdeterminedEliminate) { SharedDiagonal diagonal = noiseModel::Diagonal::Sigmas(sigmas); JacobianFactor factor(0, Ab.leftCols(3), Ab.col(3), diagonal); - GaussianFactorGraph::EliminationResult actual = factor.eliminate(list_of(0)); + GaussianFactorGraph::EliminationResult actual = factor.eliminate(Ordering{0}); Matrix expectedRd(3, 4); expectedRd << -2.64575131, -2.64575131, -2.64575131, -2.64575131, // diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index f83ba7831b..c382e0f3ce 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -22,20 +22,17 @@ #include -#include - #include #include using namespace std; using namespace gtsam; using namespace noiseModel; -using namespace boost::assign; static const double kSigma = 2, kInverseSigma = 1.0 / kSigma, kVariance = kSigma * kSigma, prc = 1.0 / kVariance; -static const Matrix R = Matrix3::Identity() * kInverseSigma; -static const Matrix kCovariance = Matrix3::Identity() * kVariance; +static const Matrix R = I_3x3 * kInverseSigma; +static const Matrix kCovariance = I_3x3 * kVariance; static const Vector3 kSigmas(kSigma, kSigma, kSigma); /* ************************************************************************* */ @@ -723,7 +720,7 @@ TEST(NoiseModel, NonDiagonalGaussian) const Matrix3 info = R.transpose() * R; const Matrix3 cov = info.inverse(); const Vector3 e(1, 1, 1), white = R * e; - Matrix I = Matrix3::Identity(); + Matrix I = I_3x3; { diff --git a/gtsam/linear/tests/testRegularHessianFactor.cpp b/gtsam/linear/tests/testRegularHessianFactor.cpp index 3441c6cc2a..2e4d2249e8 100644 --- a/gtsam/linear/tests/testRegularHessianFactor.cpp +++ b/gtsam/linear/tests/testRegularHessianFactor.cpp @@ -21,13 +21,8 @@ #include -#include -#include -#include - using namespace std; using namespace gtsam; -using namespace boost::assign; /* ************************************************************************* */ TEST(RegularHessianFactor, Constructors) @@ -36,8 +31,7 @@ TEST(RegularHessianFactor, Constructors) // 0.5*|x0 + x1 + x3 - [1;2]|^2 = 0.5*|A*x-b|^2, with A=[I I I] Matrix A1 = I_2x2, A2 = I_2x2, A3 = I_2x2; Vector2 b(1,2); - vector > terms; - terms += make_pair(0, A1), make_pair(1, A2), make_pair(3, A3); + const vector > terms {{0, A1}, {1, A2}, {3, A3}}; RegularJacobianFactor<2> jf(terms, b); // Test conversion from JacobianFactor @@ -65,8 +59,8 @@ TEST(RegularHessianFactor, Constructors) // Test n-way constructor KeyVector keys {0, 1, 3}; - vector Gs; Gs += G11, G12, G13, G22, G23, G33; - vector gs; gs += g1, g2, g3; + vector Gs {G11, G12, G13, G22, G23, G33}; + vector gs {g1, g2, g3}; RegularHessianFactor<2> factor3(keys, Gs, gs, f); EXPECT(assert_equal(factor, factor3)); @@ -82,7 +76,7 @@ TEST(RegularHessianFactor, Constructors) // Test constructor from Information matrix Matrix info = factor.augmentedInformation(); - vector dims; dims += 2, 2, 2; + vector dims {2, 2, 2}; SymmetricBlockMatrix sym(dims, info, true); RegularHessianFactor<2> factor6(keys, sym); EXPECT(assert_equal(factor, factor6)); @@ -97,10 +91,7 @@ TEST(RegularHessianFactor, Constructors) Vector Y(6); Y << 9, 12, 9, 12, 9, 12; EXPECT(assert_equal(Y,AtA*X)); - VectorValues x = map_list_of - (0, Vector2(1,2)) - (1, Vector2(3,4)) - (3, Vector2(5,6)); + VectorValues x{{0, Vector2(1, 2)}, {1, Vector2(3, 4)}, {3, Vector2(5, 6)}}; VectorValues expected; expected.insert(0, Y.segment<2>(0)); diff --git a/gtsam/linear/tests/testRegularJacobianFactor.cpp b/gtsam/linear/tests/testRegularJacobianFactor.cpp index b8c4aa689a..205d9d092d 100644 --- a/gtsam/linear/tests/testRegularJacobianFactor.cpp +++ b/gtsam/linear/tests/testRegularJacobianFactor.cpp @@ -24,14 +24,11 @@ #include -#include -#include #include #include using namespace std; using namespace gtsam; -using namespace boost::assign; static const size_t fixedDim = 3; static const size_t nrKeys = 3; @@ -40,10 +37,8 @@ static const size_t nrKeys = 3; namespace { namespace simple { // Terms we'll use - const vector > terms = list_of > - (make_pair(0, Matrix3::Identity())) - (make_pair(1, 2*Matrix3::Identity())) - (make_pair(2, 3*Matrix3::Identity())); + const vector > terms{ + {0, 1 * I_3x3}, {1, 2 * I_3x3}, {2, 3 * I_3x3}}; // RHS and sigmas const Vector b = (Vector(3) << 1., 2., 3.).finished(); @@ -52,10 +47,8 @@ namespace { namespace simple2 { // Terms - const vector > terms2 = list_of > - (make_pair(0, 2*Matrix3::Identity())) - (make_pair(1, 4*Matrix3::Identity())) - (make_pair(2, 6*Matrix3::Identity())); + const vector > terms2{ + {0, 2 * I_3x3}, {1, 4 * I_3x3}, {2, 6 * I_3x3}}; // RHS const Vector b2 = (Vector(3) << 2., 4., 6.).finished(); diff --git a/gtsam/linear/tests/testSerializationLinear.cpp b/gtsam/linear/tests/testSerializationLinear.cpp index ee21de364c..da093db977 100644 --- a/gtsam/linear/tests/testSerializationLinear.cpp +++ b/gtsam/linear/tests/testSerializationLinear.cpp @@ -22,9 +22,6 @@ #include #include -#include -using namespace boost::assign; - #include #include @@ -228,13 +225,13 @@ TEST(Serialization, gaussian_bayes_net) { /* ************************************************************************* */ TEST (Serialization, gaussian_bayes_tree) { const Key x1=1, x2=2, x3=3, x4=4; - const Ordering chainOrdering = Ordering(list_of(x2)(x1)(x3)(x4)); + const Ordering chainOrdering {x2, x1, x3, x4}; const SharedDiagonal chainNoise = noiseModel::Isotropic::Sigma(1, 0.5); - const GaussianFactorGraph chain = list_of - (JacobianFactor(x2, (Matrix(1, 1) << 1.).finished(), x1, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)) - (JacobianFactor(x2, (Matrix(1, 1) << 1.).finished(), x3, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)) - (JacobianFactor(x3, (Matrix(1, 1) << 1.).finished(), x4, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)) - (JacobianFactor(x4, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)); + const GaussianFactorGraph chain = { + boost::make_shared(x2, (Matrix(1, 1) << 1.).finished(), x1, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise), + boost::make_shared(x2, (Matrix(1, 1) << 1.).finished(), x3, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise), + boost::make_shared(x3, (Matrix(1, 1) << 1.).finished(), x4, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise), + boost::make_shared(x4, (Matrix(1, 1) << 1.).finished(), (Vector(1) << 1.).finished(), chainNoise)}; GaussianBayesTree init = *chain.eliminateMultifrontal(chainOrdering); GaussianBayesTree expected = *chain.eliminateMultifrontal(chainOrdering); diff --git a/gtsam/linear/tests/testSparseEigen.cpp b/gtsam/linear/tests/testSparseEigen.cpp index 225e1dab22..39c4f37e75 100644 --- a/gtsam/linear/tests/testSparseEigen.cpp +++ b/gtsam/linear/tests/testSparseEigen.cpp @@ -21,9 +21,6 @@ #include #include -#include -using boost::assign::list_of; - #include #include @@ -49,7 +46,7 @@ TEST(SparseEigen, sparseJacobianEigen) { EXPECT(assert_equal(gfg.augmentedJacobian(), Matrix(sparseResult))); // Call sparseJacobian with optional ordering... - auto ordering = Ordering(list_of(x45)(x123)); + const Ordering ordering{x45, x123}; // Eigen Sparse with optional ordering EXPECT(assert_equal(gfg.augmentedJacobian(ordering), diff --git a/gtsam/linear/tests/testVectorValues.cpp b/gtsam/linear/tests/testVectorValues.cpp index 521cc2289e..49fc56d193 100644 --- a/gtsam/linear/tests/testVectorValues.cpp +++ b/gtsam/linear/tests/testVectorValues.cpp @@ -21,14 +21,11 @@ #include -#include -#include #include #include using namespace std; -using namespace boost::assign; using boost::adaptors::map_keys; using namespace gtsam; @@ -73,6 +70,22 @@ TEST(VectorValues, basics) CHECK_EXCEPTION(actual.dim(3), out_of_range); } +/* ************************************************************************* */ + +static const VectorValues kExample = {{99, Vector2(2, 3)}}; + +// Check insert +TEST(VectorValues, Insert) { + VectorValues actual; + EXPECT(assert_equal(kExample, actual.insert(kExample))); +} + +// Check update. +TEST(VectorValues, Update) { + VectorValues actual(kExample); + EXPECT(assert_equal(kExample, actual.update(kExample))); +} + /* ************************************************************************* */ TEST(VectorValues, combine) { diff --git a/gtsam/mainpage.dox b/gtsam/mainpage.dox index 3c5342ca50..65290e0c93 100644 --- a/gtsam/mainpage.dox +++ b/gtsam/mainpage.dox @@ -22,7 +22,7 @@ To use GTSAM to solve your own problems, you will often have to create new facto -# The number of variables your factor involves is unknown at compile time - derive from NoiseModelFactor and implement NoiseModelFactor::unwhitenedError() - This is a factor expressing the sum-of-squares error between a measurement \f$ z \f$ and a measurement prediction function \f$ h(x) \f$, on which the errors are expected to follow some distribution specified by a noise model (see noiseModel). --# The number of variables your factor involves is known at compile time and is between 1 and 6 - derive from NoiseModelFactor1, NoiseModelFactor2, NoiseModelFactor3, NoiseModelFactor4, NoiseModelFactor5, or NoiseModelFactor6, and implement \c evaluateError() +-# The number of variables your factor involves is known at compile time, derive from NoiseModelFactorN (where T1, T2, ... are the types of the variables, e.g. double, Vector, Pose3) and implement \c evaluateError(). - This factor expresses the same sum-of-squares error with a noise model, but makes the implementation task slightly easier than with %NoiseModelFactor. -# Derive from NonlinearFactor - This is more advanced and allows creating factors without an explicit noise model, or that linearize to HessianFactor instead of JacobianFactor. diff --git a/gtsam/navigation/AHRSFactor.cpp b/gtsam/navigation/AHRSFactor.cpp index f4db42d0fd..afaaee1d4e 100644 --- a/gtsam/navigation/AHRSFactor.cpp +++ b/gtsam/navigation/AHRSFactor.cpp @@ -105,8 +105,8 @@ gtsam::NonlinearFactor::shared_ptr AHRSFactor::clone() const { //------------------------------------------------------------------------------ void AHRSFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "AHRSFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << ","; + cout << s << "AHRSFactor(" << keyFormatter(this->key<1>()) << "," + << keyFormatter(this->key<2>()) << "," << keyFormatter(this->key<3>()) << ","; _PIM_.print(" preintegrated measurements:"); noiseModel_->print(" noise model: "); } diff --git a/gtsam/navigation/AHRSFactor.h b/gtsam/navigation/AHRSFactor.h index c7d82975a5..87fdd2e918 100644 --- a/gtsam/navigation/AHRSFactor.h +++ b/gtsam/navigation/AHRSFactor.h @@ -128,10 +128,10 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation } }; -class GTSAM_EXPORT AHRSFactor: public NoiseModelFactor3 { +class GTSAM_EXPORT AHRSFactor: public NoiseModelFactorN { typedef AHRSFactor This; - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; PreintegratedAhrsMeasurements _PIM_; @@ -212,6 +212,7 @@ class GTSAM_EXPORT AHRSFactor: public NoiseModelFactor3 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor3 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor3", boost::serialization::base_object(*this)); diff --git a/gtsam/navigation/AttitudeFactor.h b/gtsam/navigation/AttitudeFactor.h index e8da541b1d..93495f227a 100644 --- a/gtsam/navigation/AttitudeFactor.h +++ b/gtsam/navigation/AttitudeFactor.h @@ -76,9 +76,9 @@ class AttitudeFactor { * Version of AttitudeFactor for Rot3 * @ingroup navigation */ -class GTSAM_EXPORT Rot3AttitudeFactor: public NoiseModelFactor1, public AttitudeFactor { +class GTSAM_EXPORT Rot3AttitudeFactor: public NoiseModelFactorN, public AttitudeFactor { - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; public: @@ -132,6 +132,7 @@ class GTSAM_EXPORT Rot3AttitudeFactor: public NoiseModelFactor1, public At friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & boost::serialization::make_nvp("AttitudeFactor", @@ -149,10 +150,10 @@ template<> struct traits : public Testable, +class GTSAM_EXPORT Pose3AttitudeFactor: public NoiseModelFactorN, public AttitudeFactor { - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; public: @@ -212,6 +213,7 @@ class GTSAM_EXPORT Pose3AttitudeFactor: public NoiseModelFactor1, friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & boost::serialization::make_nvp("AttitudeFactor", diff --git a/gtsam/navigation/BarometricFactor.cpp b/gtsam/navigation/BarometricFactor.cpp index 2f0ff7436d..2e87986aec 100644 --- a/gtsam/navigation/BarometricFactor.cpp +++ b/gtsam/navigation/BarometricFactor.cpp @@ -26,8 +26,8 @@ namespace gtsam { void BarometricFactor::print(const string& s, const KeyFormatter& keyFormatter) const { cout << (s.empty() ? "" : s + " ") << "Barometric Factor on " - << keyFormatter(key1()) << "Barometric Bias on " - << keyFormatter(key2()) << "\n"; + << keyFormatter(key<1>()) << "Barometric Bias on " + << keyFormatter(key<2>()) << "\n"; cout << " Baro measurement: " << nT_ << "\n"; noiseModel_->print(" noise model: "); diff --git a/gtsam/navigation/BarometricFactor.h b/gtsam/navigation/BarometricFactor.h index b570049a5e..2d793c475a 100644 --- a/gtsam/navigation/BarometricFactor.h +++ b/gtsam/navigation/BarometricFactor.h @@ -31,9 +31,9 @@ namespace gtsam { * https://www.grc.nasa.gov/www/k-12/airplane/atmosmet.html * @ingroup navigation */ -class GTSAM_EXPORT BarometricFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT BarometricFactor : public NoiseModelFactorN { private: - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; double nT_; ///< Height Measurement based on a standard atmosphere @@ -99,6 +99,7 @@ class GTSAM_EXPORT BarometricFactor : public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar& boost::serialization::make_nvp( "NoiseModelFactor1", boost::serialization::base_object(*this)); diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index 8d3a7dd315..38b3dc763e 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -204,9 +204,9 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const { void CombinedImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { cout << (s.empty() ? s : s + "\n") << "CombinedImuFactor(" - << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," - << keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << "," - << keyFormatter(this->key5()) << "," << keyFormatter(this->key6()) + << keyFormatter(this->key<1>()) << "," << keyFormatter(this->key<2>()) << "," + << keyFormatter(this->key<3>()) << "," << keyFormatter(this->key<4>()) << "," + << keyFormatter(this->key<5>()) << "," << keyFormatter(this->key<6>()) << ")\n"; _PIM_.print(" preintegrated measurements:"); this->noiseModel_->print(" noise model: "); diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 5591bb357d..3448e77948 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -255,14 +255,14 @@ class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType * * @ingroup navigation */ -class GTSAM_EXPORT CombinedImuFactor: public NoiseModelFactor6 { public: private: typedef CombinedImuFactor This; - typedef NoiseModelFactor6 Base; PreintegratedCombinedMeasurements _PIM_; @@ -334,7 +334,9 @@ class GTSAM_EXPORT CombinedImuFactor: public NoiseModelFactor6 void serialize(ARCHIVE& ar, const unsigned int /*version*/) { - ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(NoiseModelFactor6); + // NoiseModelFactor6 instead of NoiseModelFactorN for backward compatibility + ar& boost::serialization::make_nvp( + "NoiseModelFactor6", boost::serialization::base_object(*this)); ar& BOOST_SERIALIZATION_NVP(_PIM_); } diff --git a/gtsam/navigation/ConstantVelocityFactor.h b/gtsam/navigation/ConstantVelocityFactor.h index 6ab4c2f025..4db2b82c0c 100644 --- a/gtsam/navigation/ConstantVelocityFactor.h +++ b/gtsam/navigation/ConstantVelocityFactor.h @@ -26,15 +26,15 @@ namespace gtsam { * Binary factor for applying a constant velocity model to a moving body represented as a NavState. * The only measurement is dt, the time delta between the states. */ -class ConstantVelocityFactor : public NoiseModelFactor2 { +class ConstantVelocityFactor : public NoiseModelFactorN { double dt_; public: - using Base = NoiseModelFactor2; + using Base = NoiseModelFactorN; public: ConstantVelocityFactor(Key i, Key j, double dt, const SharedNoiseModel &model) - : NoiseModelFactor2(model, i, j), dt_(dt) {} + : NoiseModelFactorN(model, i, j), dt_(dt) {} ~ConstantVelocityFactor() override{}; /** diff --git a/gtsam/navigation/GPSFactor.h b/gtsam/navigation/GPSFactor.h index 40d0ef22a5..e515d90122 100644 --- a/gtsam/navigation/GPSFactor.h +++ b/gtsam/navigation/GPSFactor.h @@ -32,11 +32,11 @@ namespace gtsam { * See Farrell08book or e.g. http://www.dirsig.org/docs/new/coordinates.html * @ingroup navigation */ -class GTSAM_EXPORT GPSFactor: public NoiseModelFactor1 { +class GTSAM_EXPORT GPSFactor: public NoiseModelFactorN { private: - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; Point3 nT_; ///< Position measurement in cartesian coordinates @@ -99,6 +99,7 @@ class GTSAM_EXPORT GPSFactor: public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); @@ -110,11 +111,11 @@ class GTSAM_EXPORT GPSFactor: public NoiseModelFactor1 { * Version of GPSFactor for NavState * @ingroup navigation */ -class GTSAM_EXPORT GPSFactor2: public NoiseModelFactor1 { +class GTSAM_EXPORT GPSFactor2: public NoiseModelFactorN { private: - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; Point3 nT_; ///< Position measurement in cartesian coordinates @@ -163,6 +164,7 @@ class GTSAM_EXPORT GPSFactor2: public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); diff --git a/gtsam/navigation/ImuFactor.cpp b/gtsam/navigation/ImuFactor.cpp index 9b6affaaf8..1b07991e93 100644 --- a/gtsam/navigation/ImuFactor.cpp +++ b/gtsam/navigation/ImuFactor.cpp @@ -133,9 +133,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) { //------------------------------------------------------------------------------ void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << (s.empty() ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1()) - << "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) - << "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) + cout << (s.empty() ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key<1>()) + << "," << keyFormatter(this->key<2>()) << "," << keyFormatter(this->key<3>()) + << "," << keyFormatter(this->key<4>()) << "," << keyFormatter(this->key<5>()) << ")\n"; cout << *this << endl; } @@ -184,22 +184,22 @@ PreintegratedImuMeasurements ImuFactor::Merge( ImuFactor::shared_ptr ImuFactor::Merge(const shared_ptr& f01, const shared_ptr& f12) { // IMU bias keys must be the same. - if (f01->key5() != f12->key5()) + if (f01->key<5>() != f12->key<5>()) throw std::domain_error("ImuFactor::Merge: IMU bias keys must be the same"); // expect intermediate pose, velocity keys to matchup. - if (f01->key3() != f12->key1() || f01->key4() != f12->key2()) + if (f01->key<3>() != f12->key<1>() || f01->key<4>() != f12->key<2>()) throw std::domain_error( "ImuFactor::Merge: intermediate pose, velocity keys need to match up"); // return new factor auto pim02 = Merge(f01->preintegratedMeasurements(), f12->preintegratedMeasurements()); - return boost::make_shared(f01->key1(), // P0 - f01->key2(), // V0 - f12->key3(), // P2 - f12->key4(), // V2 - f01->key5(), // B + return boost::make_shared(f01->key<1>(), // P0 + f01->key<2>(), // V0 + f12->key<3>(), // P2 + f12->key<4>(), // V2 + f01->key<5>(), // B pim02); } #endif @@ -230,8 +230,8 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) { void ImuFactor2::print(const string& s, const KeyFormatter& keyFormatter) const { cout << (s.empty() ? s : s + "\n") << "ImuFactor2(" - << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," - << keyFormatter(this->key3()) << ")\n"; + << keyFormatter(this->key<1>()) << "," << keyFormatter(this->key<2>()) << "," + << keyFormatter(this->key<3>()) << ")\n"; cout << *this << endl; } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index bffe3e6455..feae1eb140 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -168,12 +168,12 @@ class GTSAM_EXPORT PreintegratedImuMeasurements: public PreintegrationType { * * @ingroup navigation */ -class GTSAM_EXPORT ImuFactor: public NoiseModelFactor5 { private: typedef ImuFactor This; - typedef NoiseModelFactor5 Base; PreintegratedImuMeasurements _PIM_; @@ -248,6 +248,7 @@ class GTSAM_EXPORT ImuFactor: public NoiseModelFactor5 void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor5 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor5", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(_PIM_); @@ -259,11 +260,11 @@ class GTSAM_EXPORT ImuFactor: public NoiseModelFactor5 { +class GTSAM_EXPORT ImuFactor2 : public NoiseModelFactorN { private: typedef ImuFactor2 This; - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; PreintegratedImuMeasurements _PIM_; @@ -316,6 +317,7 @@ class GTSAM_EXPORT ImuFactor2 : public NoiseModelFactor3 void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor3 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor3", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(_PIM_); diff --git a/gtsam/navigation/MagFactor.h b/gtsam/navigation/MagFactor.h index 895ac6512c..9a718a5e1a 100644 --- a/gtsam/navigation/MagFactor.h +++ b/gtsam/navigation/MagFactor.h @@ -30,7 +30,7 @@ namespace gtsam { * and assumes scale, direction, and the bias are given. * Rotation is around negative Z axis, i.e. positive is yaw to right! */ -class MagFactor: public NoiseModelFactor1 { +class MagFactor: public NoiseModelFactorN { const Point3 measured_; ///< The measured magnetometer values const Point3 nM_; ///< Local magnetic field (mag output units) @@ -50,7 +50,7 @@ class MagFactor: public NoiseModelFactor1 { MagFactor(Key key, const Point3& measured, double scale, const Unit3& direction, const Point3& bias, const SharedNoiseModel& model) : - NoiseModelFactor1(model, key), // + NoiseModelFactorN(model, key), // measured_(measured), nM_(scale * direction), bias_(bias) { } @@ -87,7 +87,7 @@ class MagFactor: public NoiseModelFactor1 { * This version uses model measured bM = scale * bRn * direction + bias * and assumes scale, direction, and the bias are given */ -class MagFactor1: public NoiseModelFactor1 { +class MagFactor1: public NoiseModelFactorN { const Point3 measured_; ///< The measured magnetometer values const Point3 nM_; ///< Local magnetic field (mag output units) @@ -99,7 +99,7 @@ class MagFactor1: public NoiseModelFactor1 { MagFactor1(Key key, const Point3& measured, double scale, const Unit3& direction, const Point3& bias, const SharedNoiseModel& model) : - NoiseModelFactor1(model, key), // + NoiseModelFactorN(model, key), // measured_(measured), nM_(scale * direction), bias_(bias) { } @@ -125,7 +125,7 @@ class MagFactor1: public NoiseModelFactor1 { * This version uses model measured bM = bRn * nM + bias * and optimizes for both nM and the bias, where nM is in units defined by magnetometer */ -class MagFactor2: public NoiseModelFactor2 { +class MagFactor2: public NoiseModelFactorN { const Point3 measured_; ///< The measured magnetometer values const Rot3 bRn_; ///< The assumed known rotation from nav to body @@ -135,7 +135,7 @@ class MagFactor2: public NoiseModelFactor2 { /** Constructor */ MagFactor2(Key key1, Key key2, const Point3& measured, const Rot3& nRb, const SharedNoiseModel& model) : - NoiseModelFactor2(model, key1, key2), // + NoiseModelFactorN(model, key1, key2), // measured_(measured), bRn_(nRb.inverse()) { } @@ -166,7 +166,7 @@ class MagFactor2: public NoiseModelFactor2 { * This version uses model measured bM = scale * bRn * direction + bias * and optimizes for both scale, direction, and the bias. */ -class MagFactor3: public NoiseModelFactor3 { +class MagFactor3: public NoiseModelFactorN { const Point3 measured_; ///< The measured magnetometer values const Rot3 bRn_; ///< The assumed known rotation from nav to body @@ -176,7 +176,7 @@ class MagFactor3: public NoiseModelFactor3 { /** Constructor */ MagFactor3(Key key1, Key key2, Key key3, const Point3& measured, const Rot3& nRb, const SharedNoiseModel& model) : - NoiseModelFactor3(model, key1, key2, key3), // + NoiseModelFactorN(model, key1, key2, key3), // measured_(measured), bRn_(nRb.inverse()) { } diff --git a/gtsam/navigation/MagPoseFactor.h b/gtsam/navigation/MagPoseFactor.h index da2a54ce99..c817e22b43 100644 --- a/gtsam/navigation/MagPoseFactor.h +++ b/gtsam/navigation/MagPoseFactor.h @@ -25,10 +25,10 @@ namespace gtsam { * expressed in the sensor frame. */ template -class MagPoseFactor: public NoiseModelFactor1 { +class MagPoseFactor: public NoiseModelFactorN { private: using This = MagPoseFactor; - using Base = NoiseModelFactor1; + using Base = NoiseModelFactorN; using Point = typename POSE::Translation; ///< Could be a Vector2 or Vector3 depending on POSE. using Rot = typename POSE::Rotation; @@ -129,6 +129,7 @@ class MagPoseFactor: public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/navigation/ScenarioRunner.cpp b/gtsam/navigation/ScenarioRunner.cpp index 9d3e258de9..19d241c955 100644 --- a/gtsam/navigation/ScenarioRunner.cpp +++ b/gtsam/navigation/ScenarioRunner.cpp @@ -18,11 +18,9 @@ #include #include -#include #include using namespace std; -using namespace boost::assign; namespace gtsam { diff --git a/gtsam/navigation/navigation.i b/gtsam/navigation/navigation.i index 731cf3807c..7bbef9fc5a 100644 --- a/gtsam/navigation/navigation.i +++ b/gtsam/navigation/navigation.i @@ -216,7 +216,13 @@ virtual class CombinedImuFactor: gtsam::NonlinearFactor { #include class PreintegratedAhrsMeasurements { // Standard Constructor - PreintegratedAhrsMeasurements(Vector bias, Matrix measuredOmegaCovariance); + PreintegratedAhrsMeasurements(const gtsam::PreintegrationParams* params, + const Vector& biasHat); + PreintegratedAhrsMeasurements(const gtsam::PreintegrationParams* p, + const Vector& bias_hat, double deltaTij, + const gtsam::Rot3& deltaRij, + const Matrix& delRdelBiasOmega, + const Matrix& preint_meas_cov); PreintegratedAhrsMeasurements(const gtsam::PreintegratedAhrsMeasurements& rhs); // Testable diff --git a/gtsam/nonlinear/Expression-inl.h b/gtsam/nonlinear/Expression-inl.h index b2ef6f0557..620e6afcae 100644 --- a/gtsam/nonlinear/Expression-inl.h +++ b/gtsam/nonlinear/Expression-inl.h @@ -19,6 +19,13 @@ #pragma once +// The MSVC compiler workaround for the unsupported variable length array +// utilizes the std::unique_ptr<> custom deleter. +// See Expression::valueAndJacobianMap() below. +#ifdef _MSC_VER +#include +#endif + #include #include @@ -208,7 +215,10 @@ T Expression::valueAndJacobianMap(const Values& values, // allocated on Visual Studio. For more information see the issue below // https://bitbucket.org/gtborg/gtsam/issue/178/vlas-unsupported-in-visual-studio #ifdef _MSC_VER - auto traceStorage = static_cast(_aligned_malloc(size, internal::TraceAlignment)); + std::unique_ptr traceStorageDeleter( + _aligned_malloc(size, internal::TraceAlignment), + [](void *ptr){ _aligned_free(ptr); }); + auto traceStorage = static_cast(traceStorageDeleter.get()); #else internal::ExecutionTraceStorage traceStorage[size]; #endif @@ -217,10 +227,6 @@ T Expression::valueAndJacobianMap(const Values& values, T value(this->traceExecution(values, trace, traceStorage)); trace.startReverseAD1(jacobians); -#ifdef _MSC_VER - _aligned_free(traceStorage); -#endif - return value; } diff --git a/gtsam/nonlinear/ExtendedKalmanFilter.h b/gtsam/nonlinear/ExtendedKalmanFilter.h index dc51de7bbb..d76a6ea7e6 100644 --- a/gtsam/nonlinear/ExtendedKalmanFilter.h +++ b/gtsam/nonlinear/ExtendedKalmanFilter.h @@ -53,8 +53,8 @@ class ExtendedKalmanFilter { #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 //@deprecated: any NoiseModelFactor will do, as long as they have the right keys - typedef NoiseModelFactor2 MotionFactor; - typedef NoiseModelFactor1 MeasurementFactor; + typedef NoiseModelFactorN MotionFactor; + typedef NoiseModelFactorN MeasurementFactor; #endif protected: diff --git a/gtsam/nonlinear/FunctorizedFactor.h b/gtsam/nonlinear/FunctorizedFactor.h index 394b22b6b7..1dc397c686 100644 --- a/gtsam/nonlinear/FunctorizedFactor.h +++ b/gtsam/nonlinear/FunctorizedFactor.h @@ -56,9 +56,9 @@ namespace gtsam { * MultiplyFunctor(multiplier)); */ template -class FunctorizedFactor : public NoiseModelFactor1 { +class FunctorizedFactor : public NoiseModelFactorN { private: - using Base = NoiseModelFactor1; + using Base = NoiseModelFactorN; R measured_; ///< value that is compared with functor return value SharedNoiseModel noiseModel_; ///< noise model @@ -101,7 +101,7 @@ class FunctorizedFactor : public NoiseModelFactor1 { const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); std::cout << s << (s != "" ? " " : "") << "FunctorizedFactor(" - << keyFormatter(this->key()) << ")" << std::endl; + << keyFormatter(this->key1()) << ")" << std::endl; traits::Print(measured_, " measurement: "); std::cout << " noise model sigmas: " << noiseModel_->sigmas().transpose() << std::endl; @@ -120,6 +120,7 @@ class FunctorizedFactor : public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar &boost::serialization::make_nvp( "NoiseModelFactor1", boost::serialization::base_object(*this)); ar &BOOST_SERIALIZATION_NVP(measured_); @@ -155,9 +156,9 @@ FunctorizedFactor MakeFunctorizedFactor(Key key, const R &z, * @param T2: The second argument type for the functor. */ template -class FunctorizedFactor2 : public NoiseModelFactor2 { +class FunctorizedFactor2 : public NoiseModelFactorN { private: - using Base = NoiseModelFactor2; + using Base = NoiseModelFactorN; R measured_; ///< value that is compared with functor return value SharedNoiseModel noiseModel_; ///< noise model @@ -227,6 +228,7 @@ class FunctorizedFactor2 : public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar &boost::serialization::make_nvp( "NoiseModelFactor2", boost::serialization::base_object(*this)); ar &BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/nonlinear/ISAM2-impl.h b/gtsam/nonlinear/ISAM2-impl.h index eb2285b281..7b76305820 100644 --- a/gtsam/nonlinear/ISAM2-impl.h +++ b/gtsam/nonlinear/ISAM2-impl.h @@ -436,36 +436,6 @@ struct GTSAM_EXPORT UpdateImpl { } } - /** - * Apply expmap to the given values, but only for indices appearing in - * \c mask. Values are expmapped in-place. - * \param mask Mask on linear indices, only \c true entries are expmapped - */ - static void ExpmapMasked(const VectorValues& delta, const KeySet& mask, - Values* theta) { - gttic(ExpmapMasked); - assert(theta->size() == delta.size()); - Values::iterator key_value; - VectorValues::const_iterator key_delta; -#ifdef GTSAM_USE_TBB - for (key_value = theta->begin(); key_value != theta->end(); ++key_value) { - key_delta = delta.find(key_value->key); -#else - for (key_value = theta->begin(), key_delta = delta.begin(); - key_value != theta->end(); ++key_value, ++key_delta) { - assert(key_value->key == key_delta->first); -#endif - Key var = key_value->key; - assert(static_cast(delta[var].size()) == key_value->value.dim()); - assert(delta[var].allFinite()); - if (mask.exists(var)) { - Value* retracted = key_value->value.retract_(delta[var]); - key_value->value = *retracted; - retracted->deallocate_(); - } - } - } - // Linearize new factors void linearizeNewFactors(const NonlinearFactorGraph& newFactors, const Values& theta, size_t numNonlinearFactors, diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index f56b23777c..1c15469ccb 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -454,7 +454,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, update.findFluid(roots_, relinKeys, &result.markedKeys, result.details()); // 6. Update linearization point for marked variables: // \Theta_{J}:=\Theta_{J}+\Delta_{J}. - UpdateImpl::ExpmapMasked(delta_, relinKeys, &theta_); + theta_.retractMasked(delta_, relinKeys); } result.variablesRelinearized = result.markedKeys.size(); } diff --git a/gtsam/nonlinear/ISAM2Clique.cpp b/gtsam/nonlinear/ISAM2Clique.cpp index f7a1c14a03..b3dc49342b 100644 --- a/gtsam/nonlinear/ISAM2Clique.cpp +++ b/gtsam/nonlinear/ISAM2Clique.cpp @@ -245,7 +245,7 @@ bool ISAM2Clique::optimizeWildfireNode(const KeySet& replaced, double threshold, // Back-substitute fastBackSubstitute(delta); - count += conditional_->nrFrontals(); + *count += conditional_->nrFrontals(); if (valuesChanged(replaced, originalValues, *delta, threshold)) { markFrontalsAsChanged(changed); diff --git a/gtsam/nonlinear/NonlinearEquality.h b/gtsam/nonlinear/NonlinearEquality.h index 43d30254ef..d1aa58b27a 100644 --- a/gtsam/nonlinear/NonlinearEquality.h +++ b/gtsam/nonlinear/NonlinearEquality.h @@ -42,7 +42,7 @@ namespace gtsam { * \nosubgrouping */ template -class NonlinearEquality: public NoiseModelFactor1 { +class NonlinearEquality: public NoiseModelFactorN { public: typedef VALUE T; @@ -62,7 +62,7 @@ class NonlinearEquality: public NoiseModelFactor1 { using This = NonlinearEquality; // typedef to base class - using Base = NoiseModelFactor1; + using Base = NoiseModelFactorN; public: @@ -184,6 +184,7 @@ class NonlinearEquality: public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); @@ -203,13 +204,13 @@ struct traits> : Testable> {}; * Simple unary equality constraint - fixes a value for a variable */ template -class NonlinearEquality1: public NoiseModelFactor1 { +class NonlinearEquality1: public NoiseModelFactorN { public: typedef VALUE X; protected: - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef NonlinearEquality1 This; /// Default constructor to allow for serialization @@ -272,6 +273,7 @@ class NonlinearEquality1: public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); @@ -290,9 +292,9 @@ struct traits > * be the same. */ template -class NonlinearEquality2 : public NoiseModelFactor2 { +class NonlinearEquality2 : public NoiseModelFactorN { protected: - using Base = NoiseModelFactor2; + using Base = NoiseModelFactorN; using This = NonlinearEquality2; GTSAM_CONCEPT_MANIFOLD_TYPE(T) @@ -337,6 +339,7 @@ class NonlinearEquality2 : public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar& boost::serialization::make_nvp( "NoiseModelFactor2", boost::serialization::base_object(*this)); } diff --git a/gtsam/nonlinear/NonlinearFactor.cpp b/gtsam/nonlinear/NonlinearFactor.cpp index debff54acd..b669be4723 100644 --- a/gtsam/nonlinear/NonlinearFactor.cpp +++ b/gtsam/nonlinear/NonlinearFactor.cpp @@ -16,12 +16,23 @@ * @author Richard Roberts */ +#include #include #include #include namespace gtsam { +/* ************************************************************************* */ +double NonlinearFactor::error(const Values& c) const { + throw std::runtime_error("NonlinearFactor::error is not implemented"); +} + +/* ************************************************************************* */ +double NonlinearFactor::error(const HybridValues& c) const { + return this->error(c.nonlinear()); +} + /* ************************************************************************* */ void NonlinearFactor::print(const std::string& s, const KeyFormatter& keyFormatter) const { diff --git a/gtsam/nonlinear/NonlinearFactor.h b/gtsam/nonlinear/NonlinearFactor.h index 7fafd95dfa..9ccd0246f7 100644 --- a/gtsam/nonlinear/NonlinearFactor.h +++ b/gtsam/nonlinear/NonlinearFactor.h @@ -14,6 +14,7 @@ * @brief Non-linear factor base classes * @author Frank Dellaert * @author Richard Roberts + * @author Gerry Chen */ // \callgraph @@ -25,14 +26,12 @@ #include #include #include +#include // boost::index_sequence #include -#include namespace gtsam { -using boost::assign::cref_list_of; - /* ************************************************************************* */ /** @@ -75,6 +74,7 @@ class GTSAM_EXPORT NonlinearFactor: public Factor { /** Check if two factors are equal */ virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const; + /// @} /// @name Standard Interface /// @{ @@ -82,13 +82,25 @@ class GTSAM_EXPORT NonlinearFactor: public Factor { /** Destructor */ virtual ~NonlinearFactor() {} + /** + * In nonlinear factors, the error function returns the negative log-likelihood + * as a non-linear function of the values in a \class Values object. + * + * The idea is that Gaussian factors have a quadratic error function that locally + * approximates the negative log-likelihood, and are obtained by \b linearizing + * the nonlinear error function at a given linearization. + * + * The derived class, \class NoiseModelFactor, adds a noise model to the factor, + * and calculates the error by asking the user to implement the method + * \code double evaluateError(const Values& c) const \endcode. + */ + virtual double error(const Values& c) const; /** - * Calculate the error of the factor - * This is typically equal to log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/sigma^2 \f$ in case of Gaussian. - * You can override this for systems with unusual noise models. + * The Factor::error simply extracts the \class Values from the + * \class HybridValues and calculates the error. */ - virtual double error(const Values& c) const = 0; + double error(const HybridValues& c) const override; /** get the dimension of the factor (number of rows on linearization) */ virtual size_t dim() const = 0; @@ -270,70 +282,268 @@ class GTSAM_EXPORT NoiseModelFactor: public NonlinearFactor { }; // \class NoiseModelFactor - /* ************************************************************************* */ +namespace detail { +/** Convenience base class to add aliases `X1`, `X2`, ..., `X6` -> ValueType. + * Usage example: + * ``` + * class MyFactor : public NoiseModelFactorN, + * public NoiseModelFactorAliases { + * // class implementation ... + * }; + * + * // MyFactor::X1 == Pose3 + * // MyFactor::X2 == Point3 + * ``` + */ +template +struct NoiseModelFactorAliases {}; +template +struct NoiseModelFactorAliases { + using X = T1; + using X1 = T1; +}; +template +struct NoiseModelFactorAliases { + using X1 = T1; + using X2 = T2; +}; +template +struct NoiseModelFactorAliases { + using X1 = T1; + using X2 = T2; + using X3 = T3; +}; +template +struct NoiseModelFactorAliases { + using X1 = T1; + using X2 = T2; + using X3 = T3; + using X4 = T4; +}; +template +struct NoiseModelFactorAliases { + using X1 = T1; + using X2 = T2; + using X3 = T3; + using X4 = T4; + using X5 = T5; +}; +template +struct NoiseModelFactorAliases { + using X1 = T1; + using X2 = T2; + using X3 = T3; + using X4 = T4; + using X5 = T5; + using X6 = T6; +}; +} // namespace detail +/* ************************************************************************* */ /** - * A convenient base class for creating your own NoiseModelFactor with 1 - * variable. To derive from this class, implement evaluateError(). + * A convenient base class for creating your own NoiseModelFactor + * with n variables. To derive from this class, implement evaluateError(). + * + * For example, a 2-way factor that computes the difference in x-translation + * between a Pose3 and Point3 could be implemented like so: + * + * ~~~~~~~~~~~~~~~~~~~~{.cpp} + * class MyFactor : public NoiseModelFactorN { + * public: + * using Base = NoiseModelFactorN; + * + * MyFactor(Key pose_key, Key point_key, const SharedNoiseModel& noiseModel) + * : Base(noiseModel, pose_key, point_key) {} + * + * Vector evaluateError( + * const Pose3& T, const Point3& p, + * boost::optional H_T = boost::none, + * boost::optional H_p = boost::none) const override { + * Matrix36 t_H_T; // partial derivative of translation w.r.t. pose T + * + * // Only compute t_H_T if needed: + * Point3 t = T.translation(H_T ? &t_H_T : 0); + * double a = t(0); // a_H_t = [1, 0, 0] + * double b = p(0); // b_H_p = [1, 0, 0] + * double error = a - b; // H_a = 1, H_b = -1 + * + * // H_T = H_a * a_H_t * t_H_T = the first row of t_H_T + * if (H_T) *H_T = (Matrix(1, 6) << t_H_T.row(0)).finished(); + * // H_p = H_b * b_H_p = -1 * [1, 0, 0] + * if (H_p) *H_p = (Matrix(1, 3) << -1., 0., 0.).finished(); * - * Templated on a values structure type. The values structures are typically - * more general than just vectors, e.g., Rot3 or Pose3, - * which are objects in non-linear manifolds (Lie groups). + * return Vector1(error); + * } + * }; + * + * // Unit Test + * TEST(NonlinearFactor, MyFactor) { + * MyFactor f(X(1), X(2), noiseModel::Unit::Create(1)); + * EXPECT_DOUBLES_EQUAL(-8., f.evaluateError(Pose3(), Point3(8., 7., 6.))(0), + * 1e-9); + * Values values; + * values.insert(X(1), Pose3(Rot3::RzRyRx(0.1, 0.2, 0.3), Point3(1, 2, 3))); + * values.insert(X(2), Point3(1, 2, 3)); + * EXPECT_CORRECT_FACTOR_JACOBIANS(f, values, 1e-5, 1e-5); + * } + * ~~~~~~~~~~~~~~~~~~~~ + * + * These factors are templated on a values structure type. The values structures + * are typically more general than just vectors, e.g., Rot3 or Pose3, which are + * objects in non-linear manifolds (Lie groups). */ -template -class NoiseModelFactor1: public NoiseModelFactor { +template +class NoiseModelFactorN + : public NoiseModelFactor, + public detail::NoiseModelFactorAliases { + public: + /// N is the number of variables (N-way factor) + enum { N = sizeof...(ValueTypes) }; + + protected: + using Base = NoiseModelFactor; + using This = NoiseModelFactorN; + + /// @name SFINAE aliases + /// @{ -public: + template + using IsConvertible = + typename std::enable_if::value, void>::type; - // typedefs for value types pulled from keys - typedef VALUE X; + template + using IndexIsValid = typename std::enable_if<(I >= 1) && (I <= N), + void>::type; // 1-indexed! -protected: + template + using ContainerElementType = + typename std::decay().begin())>::type; + template + using IsContainerOfKeys = IsConvertible, Key>; - typedef NoiseModelFactor Base; - typedef NoiseModelFactor1 This; + /// @} + + /* Like std::void_t, except produces `boost::optional` instead of + * `void`. Used to expand fixed-type parameter-packs with same length as + * ValueTypes. */ + template + using OptionalMatrix = boost::optional; + + /* Like std::void_t, except produces `Key` instead of `void`. Used to expand + * fixed-type parameter-packs with same length as ValueTypes. */ + template + using KeyType = Key; + + public: + /** + * The type of the I'th template param can be obtained as ValueType. + * I is 1-indexed for backwards compatibility/consistency! So for example, + * ``` + * using Factor = NoiseModelFactorN; + * Factor::ValueType<1> // Pose3 + * Factor::ValueType<2> // Point3 + * // Factor::ValueType<0> // ERROR! Will not compile. + * // Factor::ValueType<3> // ERROR! Will not compile. + * ``` + * + * You can also use the shortcuts `X1`, ..., `X6` which are the same as + * `ValueType<1>`, ..., `ValueType<6>` respectively (see + * detail::NoiseModelFactorAliases). + * + * Note that, if your class is templated AND you want to use `ValueType<1>` + * inside your class, due to dependent types you need the `template` keyword: + * `typename MyFactor::template ValueType<1>`. + */ + template > + using ValueType = + typename std::tuple_element>::type; + + public: -public: /// @name Constructors /// @{ - /** Default constructor for I/O only */ - NoiseModelFactor1() {} - - ~NoiseModelFactor1() override {} + /// Default Constructor for I/O + NoiseModelFactorN() {} - inline Key key() const { return keys_[0]; } + /** + * Constructor. + * Example usage: NoiseModelFactorN(noise, key1, key2, ..., keyN) + * @param noiseModel Shared pointer to noise model. + * @param keys Keys for the variables in this factor, passed in as separate + * arguments. + */ + NoiseModelFactorN(const SharedNoiseModel& noiseModel, + KeyType... keys) + : Base(noiseModel, std::array{keys...}) {} /** - * Constructor - * @param noiseModel shared pointer to noise model - * @param key1 by which to look up X value in Values + * Constructor. + * Example usage: `NoiseModelFactorN(noise, {key1, key2, ..., keyN})` + * Example usage: `NoiseModelFactorN(noise, keys)` where keys is a vector + * @param noiseModel Shared pointer to noise model. + * @param keys A container of keys for the variables in this factor. */ - NoiseModelFactor1(const SharedNoiseModel &noiseModel, Key key1) - : Base(noiseModel, cref_list_of<1>(key1)) {} + template , + typename = IsContainerOfKeys> + NoiseModelFactorN(const SharedNoiseModel& noiseModel, CONTAINER keys) + : Base(noiseModel, keys) { + if (keys.size() != N) { + throw std::invalid_argument( + "NoiseModelFactorN: wrong number of keys given"); + } + } /// @} + + ~NoiseModelFactorN() override {} + + /** Returns a key. Usage: `key()` returns the I'th key. + * I is 1-indexed for backwards compatibility/consistency! So for example, + * ``` + * NoiseModelFactorN factor(noise, key1, key2); + * key<1>() // = key1 + * key<2>() // = key2 + * // key<0>() // ERROR! Will not compile + * // key<3>() // ERROR! Will not compile + * ``` + * + * Note that, if your class is templated AND you are trying to call `key<1>` + * inside your class, due to dependent types you need the `template` keyword: + * `this->key1()`. + */ + template + inline Key key() const { + static_assert(I <= N, "Index out of bounds"); + return keys_[I - 1]; + } + /// @name NoiseModelFactor methods /// @{ - /** - * Calls the 1-key specific version of evaluateError below, which is pure - * virtual so must be implemented in the derived class. + /** This implements the `unwhitenedError` virtual function by calling the + * n-key specific version of evaluateError, which is pure virtual so must be + * implemented in the derived class. + * + * Example usage: + * ``` + * gtsam::Values values; + * values.insert(...) // populate values + * std::vector Hs(2); // this will be an optional output argument + * const Vector error = factor.unwhitenedError(values, Hs); + * ``` + * @param[in] x A Values object containing the values of all the variables + * used in this factor + * @param[out] H A vector of (dynamic) matrices whose size should be equal to + * n. The Jacobians w.r.t. each variable will be output in this parameter. */ Vector unwhitenedError( - const Values &x, - boost::optional &> H = boost::none) const override { - if (this->active(x)) { - const X &x1 = x.at(keys_[0]); - if (H) { - return evaluateError(x1, (*H)[0]); - } else { - return evaluateError(x1); - } - } else { - return Vector::Zero(this->dim()); - } + const Values& x, + boost::optional&> H = boost::none) const override { + return unwhitenedError(boost::mp11::index_sequence_for{}, x, + H); } /// @} @@ -341,434 +551,133 @@ class NoiseModelFactor1: public NoiseModelFactor { /// @{ /** - * Override this method to finish implementing a unary factor. - * If the optional Matrix reference argument is specified, it should compute - * both the function evaluation and its derivative in X. + * Override `evaluateError` to finish implementing an n-way factor. + * + * Both the `x` and `H` arguments are written here as parameter packs, but + * when overriding this method, you probably want to explicitly write them + * out. For example, for a 2-way factor with variable types Pose3 and Point3, + * you should implement: + * ``` + * Vector evaluateError( + * const Pose3& x1, const Point3& x2, + * boost::optional H1 = boost::none, + * boost::optional H2 = boost::none) const override { ... } + * ``` + * + * If any of the optional Matrix reference arguments are specified, it should + * compute both the function evaluation and its derivative(s) in the requested + * variables. + * + * @param x The values of the variables to evaluate the error for. Passed in + * as separate arguments. + * @param[out] H The Jacobian with respect to each variable (optional). */ - virtual Vector - evaluateError(const X &x, - boost::optional H = boost::none) const = 0; + virtual Vector evaluateError(const ValueTypes&... x, + OptionalMatrix... H) const = 0; /// @} -private: - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor", - boost::serialization::base_object(*this)); - } -};// \class NoiseModelFactor1 - - -/* ************************************************************************* */ -/** A convenient base class for creating your own NoiseModelFactor with 2 - * variables. To derive from this class, implement evaluateError(). */ -template -class NoiseModelFactor2: public NoiseModelFactor { - -public: - - // typedefs for value types pulled from keys - typedef VALUE1 X1; - typedef VALUE2 X2; - -protected: - - typedef NoiseModelFactor Base; - typedef NoiseModelFactor2 This; - -public: - - /** - * Default Constructor for I/O - */ - NoiseModelFactor2() {} + /// @name Convenience method overloads + /// @{ - /** - * Constructor - * @param noiseModel shared pointer to noise model - * @param j1 key of the first variable - * @param j2 key of the second variable + /** No-Jacobians requested function overload. + * This specializes the version below to avoid recursive calls since this is + * commonly used. + * + * e.g. `const Vector error = factor.evaluateError(pose, point);` */ - NoiseModelFactor2(const SharedNoiseModel& noiseModel, Key j1, Key j2) : - Base(noiseModel, cref_list_of<2>(j1)(j2)) {} - - ~NoiseModelFactor2() override {} - - /** methods to retrieve both keys */ - inline Key key1() const { return keys_[0]; } - inline Key key2() const { return keys_[1]; } - - /** Calls the 2-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. */ - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { - if(this->active(x)) { - const X1& x1 = x.at(keys_[0]); - const X2& x2 = x.at(keys_[1]); - if(H) { - return evaluateError(x1, x2, (*H)[0], (*H)[1]); - } else { - return evaluateError(x1, x2); - } - } else { - return Vector::Zero(this->dim()); - } + inline Vector evaluateError(const ValueTypes&... x) const { + return evaluateError(x..., OptionalMatrix()...); } - /** - * Override this method to finish implementing a binary factor. - * If any of the optional Matrix reference arguments are specified, it should compute - * both the function evaluation and its derivative(s) in X1 (and/or X2). - */ - virtual Vector - evaluateError(const X1&, const X2&, boost::optional H1 = - boost::none, boost::optional H2 = boost::none) const = 0; - -private: - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor", - boost::serialization::base_object(*this)); + /** Some (but not all) optional Jacobians are omitted (function overload) + * + * e.g. `const Vector error = factor.evaluateError(pose, point, Hpose);` + */ + template > + inline Vector evaluateError(const ValueTypes&... x, + OptionalJacArgs&&... H) const { + return evaluateError(x..., std::forward(H)..., + boost::none); } -}; // \class NoiseModelFactor2 - -/* ************************************************************************* */ -/** A convenient base class for creating your own NoiseModelFactor with 3 - * variables. To derive from this class, implement evaluateError(). */ -template -class NoiseModelFactor3: public NoiseModelFactor { -public: - - // typedefs for value types pulled from keys - typedef VALUE1 X1; - typedef VALUE2 X2; - typedef VALUE3 X3; - -protected: - - typedef NoiseModelFactor Base; - typedef NoiseModelFactor3 This; - -public: - - /** - * Default Constructor for I/O - */ - NoiseModelFactor3() {} + /// @} - /** - * Constructor - * @param noiseModel shared pointer to noise model - * @param j1 key of the first variable - * @param j2 key of the second variable - * @param j3 key of the third variable - */ - NoiseModelFactor3(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3) : - Base(noiseModel, cref_list_of<3>(j1)(j2)(j3)) {} - - ~NoiseModelFactor3() override {} - - /** methods to retrieve keys */ - inline Key key1() const { return keys_[0]; } - inline Key key2() const { return keys_[1]; } - inline Key key3() const { return keys_[2]; } - - /** Calls the 3-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. */ - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { - if(this->active(x)) { - if(H) - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), (*H)[0], (*H)[1], (*H)[2]); - else - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2])); + private: + /** Pack expansion with index_sequence template pattern, used to index into + * `keys_` and `H`. + * + * Example: For `NoiseModelFactorN`, the call would look like: + * `const Vector error = unwhitenedError(0, 1, values, H);` + */ + template + inline Vector unwhitenedError( + boost::mp11::index_sequence, // + const Values& x, + boost::optional&> H = boost::none) const { + if (this->active(x)) { + if (H) { + return evaluateError(x.at(keys_[Indices])..., + (*H)[Indices]...); + } else { + return evaluateError(x.at(keys_[Indices])...); + } } else { return Vector::Zero(this->dim()); } } - /** - * Override this method to finish implementing a trinary factor. - * If any of the optional Matrix reference arguments are specified, it should compute - * both the function evaluation and its derivative(s) in X1 (and/or X2, X3). - */ - virtual Vector - evaluateError(const X1&, const X2&, const X3&, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const = 0; - -private: - /** Serialization function */ friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor", - boost::serialization::base_object(*this)); + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "NoiseModelFactor", boost::serialization::base_object(*this)); } -}; // \class NoiseModelFactor3 - -/* ************************************************************************* */ -/** A convenient base class for creating your own NoiseModelFactor with 4 - * variables. To derive from this class, implement evaluateError(). */ -template -class NoiseModelFactor4: public NoiseModelFactor { -public: - - // typedefs for value types pulled from keys - typedef VALUE1 X1; - typedef VALUE2 X2; - typedef VALUE3 X3; - typedef VALUE4 X4; - -protected: - - typedef NoiseModelFactor Base; - typedef NoiseModelFactor4 This; - -public: - - /** - * Default Constructor for I/O - */ - NoiseModelFactor4() {} + public: + /// @name Shortcut functions `key1()` -> `key<1>()` + /// @{ - /** - * Constructor - * @param noiseModel shared pointer to noise model - * @param j1 key of the first variable - * @param j2 key of the second variable - * @param j3 key of the third variable - * @param j4 key of the fourth variable - */ - NoiseModelFactor4(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3, Key j4) : - Base(noiseModel, cref_list_of<4>(j1)(j2)(j3)(j4)) {} - - ~NoiseModelFactor4() override {} - - /** methods to retrieve keys */ - inline Key key1() const { return keys_[0]; } - inline Key key2() const { return keys_[1]; } - inline Key key3() const { return keys_[2]; } - inline Key key4() const { return keys_[3]; } - - /** Calls the 4-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. */ - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { - if(this->active(x)) { - if(H) - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), (*H)[0], (*H)[1], (*H)[2], (*H)[3]); - else - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3])); - } else { - return Vector::Zero(this->dim()); - } + inline Key key1() const { + return key<1>(); } - - /** - * Override this method to finish implementing a 4-way factor. - * If any of the optional Matrix reference arguments are specified, it should compute - * both the function evaluation and its derivative(s) in X1 (and/or X2, X3). - */ - virtual Vector - evaluateError(const X1&, const X2&, const X3&, const X4&, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none, - boost::optional H3 = boost::none, - boost::optional H4 = boost::none) const = 0; - -private: - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor", - boost::serialization::base_object(*this)); + template + inline Key key2() const { + static_assert(I <= N, "Index out of bounds"); + return key<2>(); } -}; // \class NoiseModelFactor4 - -/* ************************************************************************* */ -/** A convenient base class for creating your own NoiseModelFactor with 5 - * variables. To derive from this class, implement evaluateError(). */ -template -class NoiseModelFactor5: public NoiseModelFactor { - -public: - - // typedefs for value types pulled from keys - typedef VALUE1 X1; - typedef VALUE2 X2; - typedef VALUE3 X3; - typedef VALUE4 X4; - typedef VALUE5 X5; - -protected: - - typedef NoiseModelFactor Base; - typedef NoiseModelFactor5 This; - -public: - - /** - * Default Constructor for I/O - */ - NoiseModelFactor5() {} - - /** - * Constructor - * @param noiseModel shared pointer to noise model - * @param j1 key of the first variable - * @param j2 key of the second variable - * @param j3 key of the third variable - * @param j4 key of the fourth variable - * @param j5 key of the fifth variable - */ - NoiseModelFactor5(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3, Key j4, Key j5) : - Base(noiseModel, cref_list_of<5>(j1)(j2)(j3)(j4)(j5)) {} - - ~NoiseModelFactor5() override {} - - /** methods to retrieve keys */ - inline Key key1() const { return keys_[0]; } - inline Key key2() const { return keys_[1]; } - inline Key key3() const { return keys_[2]; } - inline Key key4() const { return keys_[3]; } - inline Key key5() const { return keys_[4]; } - - /** Calls the 5-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. */ - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { - if(this->active(x)) { - if(H) - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), x.at(keys_[4]), (*H)[0], (*H)[1], (*H)[2], (*H)[3], (*H)[4]); - else - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), x.at(keys_[4])); - } else { - return Vector::Zero(this->dim()); - } + template + inline Key key3() const { + static_assert(I <= N, "Index out of bounds"); + return key<3>(); } - - /** - * Override this method to finish implementing a 5-way factor. - * If any of the optional Matrix reference arguments are specified, it should compute - * both the function evaluation and its derivative(s) in X1 (and/or X2, X3). - */ - virtual Vector - evaluateError(const X1&, const X2&, const X3&, const X4&, const X5&, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none, - boost::optional H3 = boost::none, - boost::optional H4 = boost::none, - boost::optional H5 = boost::none) const = 0; - -private: - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor", - boost::serialization::base_object(*this)); + template + inline Key key4() const { + static_assert(I <= N, "Index out of bounds"); + return key<4>(); } -}; // \class NoiseModelFactor5 - -/* ************************************************************************* */ -/** A convenient base class for creating your own NoiseModelFactor with 6 - * variables. To derive from this class, implement evaluateError(). */ -template -class NoiseModelFactor6: public NoiseModelFactor { - -public: - - // typedefs for value types pulled from keys - typedef VALUE1 X1; - typedef VALUE2 X2; - typedef VALUE3 X3; - typedef VALUE4 X4; - typedef VALUE5 X5; - typedef VALUE6 X6; - -protected: - - typedef NoiseModelFactor Base; - typedef NoiseModelFactor6 This; - -public: - - /** - * Default Constructor for I/O - */ - NoiseModelFactor6() {} - - /** - * Constructor - * @param noiseModel shared pointer to noise model - * @param j1 key of the first variable - * @param j2 key of the second variable - * @param j3 key of the third variable - * @param j4 key of the fourth variable - * @param j5 key of the fifth variable - * @param j6 key of the fifth variable - */ - NoiseModelFactor6(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3, Key j4, Key j5, Key j6) : - Base(noiseModel, cref_list_of<6>(j1)(j2)(j3)(j4)(j5)(j6)) {} - - ~NoiseModelFactor6() override {} - - /** methods to retrieve keys */ - inline Key key1() const { return keys_[0]; } - inline Key key2() const { return keys_[1]; } - inline Key key3() const { return keys_[2]; } - inline Key key4() const { return keys_[3]; } - inline Key key5() const { return keys_[4]; } - inline Key key6() const { return keys_[5]; } - - /** Calls the 6-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. */ - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { - if(this->active(x)) { - if(H) - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), x.at(keys_[4]), x.at(keys_[5]), (*H)[0], (*H)[1], (*H)[2], (*H)[3], (*H)[4], (*H)[5]); - else - return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), x.at(keys_[4]), x.at(keys_[5])); - } else { - return Vector::Zero(this->dim()); - } + template + inline Key key5() const { + static_assert(I <= N, "Index out of bounds"); + return key<5>(); + } + template + inline Key key6() const { + static_assert(I <= N, "Index out of bounds"); + return key<6>(); } - /** - * Override this method to finish implementing a 6-way factor. - * If any of the optional Matrix reference arguments are specified, it should compute - * both the function evaluation and its derivative(s) in X1 (and/or X2, X3). - */ - virtual Vector - evaluateError(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none, - boost::optional H3 = boost::none, - boost::optional H4 = boost::none, - boost::optional H5 = boost::none, - boost::optional H6 = boost::none) const = 0; - -private: + /// @} - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor", - boost::serialization::base_object(*this)); - } -}; // \class NoiseModelFactor6 +}; // \class NoiseModelFactorN -/* ************************************************************************* */ +#define NoiseModelFactor1 NoiseModelFactorN +#define NoiseModelFactor2 NoiseModelFactorN +#define NoiseModelFactor3 NoiseModelFactorN +#define NoiseModelFactor4 NoiseModelFactorN +#define NoiseModelFactor5 NoiseModelFactorN +#define NoiseModelFactor6 NoiseModelFactorN -} // \namespace gtsam +} // namespace gtsam diff --git a/gtsam/nonlinear/NonlinearFactorGraph.cpp b/gtsam/nonlinear/NonlinearFactorGraph.cpp index dfa54f26f0..84c15c0042 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.cpp +++ b/gtsam/nonlinear/NonlinearFactorGraph.cpp @@ -285,8 +285,8 @@ static Scatter scatterFromValues(const Values& values) { scatter.reserve(values.size()); // use "natural" ordering with keys taken from the initial values - for (const auto key_value : values) { - scatter.add(key_value.key, key_value.value.dim()); + for (const auto& key_dim : values.dims()) { + scatter.add(key_dim.first, key_dim.second); } return scatter; diff --git a/gtsam/nonlinear/PriorFactor.h b/gtsam/nonlinear/PriorFactor.h index 2d4a0ca326..d81ffbd319 100644 --- a/gtsam/nonlinear/PriorFactor.h +++ b/gtsam/nonlinear/PriorFactor.h @@ -27,14 +27,14 @@ namespace gtsam { * @ingroup nonlinear */ template - class PriorFactor: public NoiseModelFactor1 { + class PriorFactor: public NoiseModelFactorN { public: typedef VALUE T; private: - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; VALUE prior_; /** The measurement */ @@ -105,6 +105,7 @@ namespace gtsam { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(prior_); diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index 0370c5ceea..04c6440a2b 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -25,10 +25,8 @@ #pragma once #include - #include - -#include // Only so Eclipse finds class definition +#include namespace gtsam { @@ -90,7 +88,27 @@ namespace gtsam { } }; - /* ************************************************************************* */ +/* ************************************************************************* */ + template + std::map + Values::extract(const std::function& filterFcn) const { + std::map result; + for (const auto& key_value : values_) { + // Check if key matches + if (filterFcn(key_value.first)) { + // Check if type matches (typically does as symbols matched with types) + if (auto t = + dynamic_cast*>(key_value.second)) + result[key_value.first] = t->value(); + } + } + return result; + } + +/* ************************************************************************* */ +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 +#include + template class Values::Filtered { public: @@ -145,14 +163,14 @@ namespace gtsam { &ValuesCastHelper::cast)), constBegin_( boost::make_transform_iterator( boost::make_filter_iterator(filter, - ((const Values&) values).begin(), - ((const Values&) values).end()), + values._begin(), + values._end()), &ValuesCastHelper::cast)), constEnd_( boost::make_transform_iterator( boost::make_filter_iterator(filter, - ((const Values&) values).end(), - ((const Values&) values).end()), + values._end(), + values._end()), &ValuesCastHelper::cast)) { } @@ -164,7 +182,6 @@ namespace gtsam { const_const_iterator constEnd_; }; - /* ************************************************************************* */ template class Values::ConstFiltered { public: @@ -215,8 +232,6 @@ namespace gtsam { } }; - /* ************************************************************************* */ - /** Constructor from a Filtered view copies out all values */ template Values::Values(const Values::Filtered& view) { for(const auto key_value: view) { @@ -225,7 +240,6 @@ namespace gtsam { } } - /* ************************************************************************* */ template Values::Values(const Values::ConstFiltered& view) { for(const auto key_value: view) { @@ -234,13 +248,11 @@ namespace gtsam { } } - /* ************************************************************************* */ Values::Filtered inline Values::filter(const std::function& filterFcn) { return filter(filterFcn); } - /* ************************************************************************* */ template Values::Filtered Values::filter(const std::function& filterFcn) { @@ -248,19 +260,18 @@ namespace gtsam { std::placeholders::_1), *this); } - /* ************************************************************************* */ Values::ConstFiltered inline Values::filter(const std::function& filterFcn) const { return filter(filterFcn); } - /* ************************************************************************* */ template Values::ConstFiltered Values::filter(const std::function& filterFcn) const { return ConstFiltered(std::bind(&filterHelper, filterFcn, std::placeholders::_1), *this); } +#endif /* ************************************************************************* */ template<> diff --git a/gtsam/nonlinear/Values.cpp b/gtsam/nonlinear/Values.cpp index adadc99c06..c7321c904d 100644 --- a/gtsam/nonlinear/Values.cpp +++ b/gtsam/nonlinear/Values.cpp @@ -25,8 +25,6 @@ #include #include -#include - #include #include #include @@ -52,15 +50,15 @@ namespace gtsam { /* ************************************************************************* */ Values::Values(const Values& other, const VectorValues& delta) { - for (const_iterator key_value = other.begin(); key_value != other.end(); ++key_value) { - VectorValues::const_iterator it = delta.find(key_value->key); - Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument + for (const auto& key_value : other.values_) { + VectorValues::const_iterator it = delta.find(key_value.first); + Key key = key_value.first; // Non-const duplicate to deal with non-const insert argument if (it != delta.end()) { const Vector& v = it->second; - Value* retractedValue(key_value->value.retract_(v)); // Retract + Value* retractedValue(key_value.second->retract_(v)); // Retract values_.insert(key, retractedValue); // Add retracted result directly to result values } else { - values_.insert(key, key_value->value.clone_()); // Add original version to result values + values_.insert(key, key_value.second->clone_()); // Add original version to result values } } } @@ -69,9 +67,9 @@ namespace gtsam { void Values::print(const string& str, const KeyFormatter& keyFormatter) const { cout << str << (str.empty() ? "" : "\n"); cout << "Values with " << size() << " values:\n"; - for(const_iterator key_value = begin(); key_value != end(); ++key_value) { - cout << "Value " << keyFormatter(key_value->key) << ": "; - key_value->value.print(""); + for (const auto& key_value : values_) { + cout << "Value " << keyFormatter(key_value.first) << ": "; + key_value.second->print(""); cout << "\n"; } } @@ -80,12 +78,12 @@ namespace gtsam { bool Values::equals(const Values& other, double tol) const { if (this->size() != other.size()) return false; - for (const_iterator it1 = this->begin(), it2 = other.begin(); - it1 != this->end(); ++it1, ++it2) { - const Value& value1 = it1->value; - const Value& value2 = it2->value; - if (typeid(value1) != typeid(value2) || it1->key != it2->key - || !value1.equals_(value2, tol)) { + for (auto it1 = values_.begin(), it2 = other.values_.begin(); + it1 != values_.end(); ++it1, ++it2) { + const Value* value1 = it1->second; + const Value* value2 = it2->second; + if (typeid(*value1) != typeid(*value2) || it1->first != it2->first + || !value1->equals_(*value2, tol)) { return false; } } @@ -102,17 +100,44 @@ namespace gtsam { return Values(*this, delta); } + /* ************************************************************************* */ + void Values::retractMasked(const VectorValues& delta, const KeySet& mask) { + gttic(retractMasked); + assert(this->size() == delta.size()); + auto key_value = values_.begin(); + VectorValues::const_iterator key_delta; +#ifdef GTSAM_USE_TBB + for (; key_value != values_.end(); ++key_value) { + key_delta = delta.find(key_value->first); +#else + for (key_delta = delta.begin(); key_value != values_.end(); + ++key_value, ++key_delta) { + assert(key_value->first == key_delta->first); +#endif + Key var = key_value->first; + assert(static_cast(delta[var].size()) == key_value->second->dim()); + assert(delta[var].allFinite()); + if (mask.exists(var)) { + Value* retracted = key_value->second->retract_(delta[var]); + // TODO(dellaert): can we use std::move here? + *(key_value->second) = *retracted; + retracted->deallocate_(); + } + } + } + /* ************************************************************************* */ VectorValues Values::localCoordinates(const Values& cp) const { if(this->size() != cp.size()) throw DynamicValuesMismatched(); VectorValues result; - for(const_iterator it1=this->begin(), it2=cp.begin(); it1!=this->end(); ++it1, ++it2) { - if(it1->key != it2->key) + for (auto it1 = values_.begin(), it2 = cp.values_.begin(); + it1 != values_.end(); ++it1, ++it2) { + if(it1->first != it2->first) throw DynamicValuesMismatched(); // If keys do not match // Will throw a dynamic_cast exception if types do not match // NOTE: this is separate from localCoordinates(cp, ordering, result) due to at() vs. insert - result.insert(it1->key, it1->value.localCoordinates_(it2->value)); + result.insert(it1->first, it1->second->localCoordinates_(*it2->second)); } return result; } @@ -130,24 +155,26 @@ namespace gtsam { /* ************************************************************************* */ void Values::insert(Key j, const Value& val) { - std::pair insertResult = tryInsert(j, val); + auto insertResult = values_.insert(j, val.clone_()); if(!insertResult.second) throw ValuesKeyAlreadyExists(j); } /* ************************************************************************* */ - void Values::insert(const Values& values) { - for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) { - Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument - insert(key, key_value->value); + void Values::insert(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + insert(key_value->first, *(key_value->second)); } } /* ************************************************************************* */ +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 std::pair Values::tryInsert(Key j, const Value& value) { std::pair result = values_.insert(j, value.clone_()); return std::make_pair(boost::make_transform_iterator(result.first, &make_deref_pair), result.second); } +#endif /* ************************************************************************* */ void Values::update(Key j, const Value& val) { @@ -165,9 +192,10 @@ namespace gtsam { } /* ************************************************************************* */ - void Values::update(const Values& values) { - for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) { - this->update(key_value->key, key_value->value); + void Values::update(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + this->update(key_value->first, *(key_value->second)); } } @@ -183,10 +211,10 @@ namespace gtsam { } /* ************************************************************************ */ - void Values::insert_or_assign(const Values& values) { - for (const_iterator key_value = values.begin(); key_value != values.end(); - ++key_value) { - this->insert_or_assign(key_value->key, key_value->value); + void Values::insert_or_assign(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + this->insert_or_assign(key_value->first, *(key_value->second)); } } @@ -202,8 +230,16 @@ namespace gtsam { KeyVector Values::keys() const { KeyVector result; result.reserve(size()); - for(const_iterator key_value = begin(); key_value != end(); ++key_value) - result.push_back(key_value->key); + for(const auto& key_value: values_) + result.push_back(key_value.first); + return result; + } + + /* ************************************************************************* */ + KeySet Values::keySet() const { + KeySet result; + for(const auto& key_value: values_) + result.insert(key_value.first); return result; } @@ -217,8 +253,17 @@ namespace gtsam { /* ************************************************************************* */ size_t Values::dim() const { size_t result = 0; - for(const auto key_value: *this) { - result += key_value.value.dim(); + for (const auto key_value : values_) { + result += key_value->second->dim(); + } + return result; + } + + /* ************************************************************************* */ + std::map Values::dims() const { + std::map result; + for (const auto key_value : values_) { + result.emplace(key_value->first, key_value->second->dim()); } return result; } @@ -226,8 +271,8 @@ namespace gtsam { /* ************************************************************************* */ VectorValues Values::zeroVectors() const { VectorValues result; - for(const auto key_value: *this) - result.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for (const auto key_value : values_) + result.insert(key_value->first, Vector::Zero(key_value->second->dim())); return result; } diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index cfe6347b50..74f22a27df 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -28,10 +28,12 @@ #include #include #include -#include -#include #include #include +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 +#include +#include +#endif #include #include @@ -79,10 +81,6 @@ namespace gtsam { // The member to store the values, see just above KeyValueMap values_; - // Types obtained by iterating - typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair; - typedef KeyValueMap::iterator::value_type KeyValuePtrPair; - public: /// A shared_ptr to this class @@ -108,34 +106,13 @@ namespace gtsam { ConstKeyValuePair(const KeyValuePair& kv) : key(kv.key), value(kv.value) {} }; - /// Mutable forward iterator, with value type KeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::iterator> iterator; - - /// Const forward iterator, with value type ConstKeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::const_iterator> const_iterator; - - /// Mutable reverse iterator, with value type KeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::reverse_iterator> reverse_iterator; - - /// Const reverse iterator, with value type ConstKeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::const_reverse_iterator> const_reverse_iterator; - typedef KeyValuePair value_type; - /** A filtered view of a Values, returned from Values::filter. */ - template - class Filtered; - - /** A filtered view of a const Values, returned from Values::filter. */ - template - class ConstFiltered; + /// @name Constructors + /// @{ /** Default constructor creates an empty Values class */ - Values() {} + Values() = default; /** Copy constructor duplicates all keys and values */ Values(const Values& other); @@ -153,14 +130,7 @@ namespace gtsam { /** Construct from a Values and an update vector: identical to other.retract(delta) */ Values(const Values& other, const VectorValues& delta); - /** Constructor from a Filtered view copies out all values */ - template - Values(const Filtered& view); - - /** Constructor from a Filtered or ConstFiltered view */ - template - Values(const ConstFiltered& view); - + /// @} /// @name Testable /// @{ @@ -171,6 +141,8 @@ namespace gtsam { bool equals(const Values& other, double tol=1e-9) const; /// @} + /// @name Standard Interface + /// @{ /** Retrieve a variable by key \c j. The type of the value associated with * this key is supplied as a template argument to this function. @@ -205,47 +177,60 @@ namespace gtsam { template boost::optional exists(Key j) const; - /** Find an element by key, returning an iterator, or end() if the key was - * not found. */ - iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); } - - /** Find an element by key, returning an iterator, or end() if the key was - * not found. */ - const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); } - - /** Find the element greater than or equal to the specified key. */ - iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); } - - /** Find the element greater than or equal to the specified key. */ - const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); } - - /** Find the lowest-ordered element greater than the specified key. */ - iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); } - - /** Find the lowest-ordered element greater than the specified key. */ - const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); } - /** The number of variables in this config */ size_t size() const { return values_.size(); } /** whether the config is empty */ bool empty() const { return values_.empty(); } - const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); } - const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); } - iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); } - iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); } - const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); } - const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); } - reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); } - reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); } + /// @} + /// @name Iterator + /// @{ + + struct deref_iterator { + using const_iterator_type = typename KeyValueMap::const_iterator; + const_iterator_type it_; + deref_iterator(const_iterator_type it) : it_(it) {} + ConstKeyValuePair operator*() const { return {it_->first, *(it_->second)}; } + boost::shared_ptr operator->() { + return boost::make_shared(it_->first, *(it_->second)); + } + bool operator==(const deref_iterator& other) const { + return it_ == other.it_; + } + bool operator!=(const deref_iterator& other) const { return it_ != other.it_; } + deref_iterator& operator++() { + ++it_; + return *this; + } + }; + + deref_iterator begin() const { return deref_iterator(values_.begin()); } + deref_iterator end() const { return deref_iterator(values_.end()); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + deref_iterator find(Key j) const { return deref_iterator(values_.find(j)); } + + /** Find the element greater than or equal to the specified key. */ + deref_iterator lower_bound(Key j) const { return deref_iterator(values_.lower_bound(j)); } + + /** Find the lowest-ordered element greater than the specified key. */ + deref_iterator upper_bound(Key j) const { return deref_iterator(values_.upper_bound(j)); } + /// @} /// @name Manifold Operations /// @{ /** Add a delta config to current config and returns a new config */ Values retract(const VectorValues& delta) const; + /** + * Retract, but only for Keys appearing in \c mask. In-place. + * \param mask Mask on Keys where to apply retract. + */ + void retractMasked(const VectorValues& delta, const KeySet& mask); + /** Get a delta config about a linearization point c0 (*this) */ VectorValues localCoordinates(const Values& cp) const; @@ -266,12 +251,6 @@ namespace gtsam { /// version for double void insertDouble(Key j, double c) { insert(j,c); } - /** insert that mimics the STL map insert - if the value already exists, the map is not modified - * and an iterator to the existing value is returned, along with 'false'. If the value did not - * exist, it is inserted and an iterator pointing to the new element, along with 'true', is - * returned. */ - std::pair tryInsert(Key j, const Value& value); - /** single element change of existing element */ void update(Key j, const Value& val); @@ -302,11 +281,16 @@ namespace gtsam { void erase(Key j); /** - * Returns a set of keys in the config + * Returns a vector of keys in the config. * Note: by construction, the list is ordered */ KeyVector keys() const; + /** + * Returns a set of keys in the config. + */ + KeySet keySet() const; + /** Replace all keys and variables */ Values& operator=(const Values& rhs); @@ -319,33 +303,32 @@ namespace gtsam { /** Compute the total dimensionality of all values (\f$ O(n) \f$) */ size_t dim() const; + /** Return all dimensions in a map (\f$ O(n log n) \f$) */ + std::map dims() const; + /** Return a VectorValues of zero vectors for each variable in this Values */ VectorValues zeroVectors() const; - /** - * Return a filtered view of this Values class, without copying any data. - * When iterating over the filtered view, only the key-value pairs - * with a key causing \c filterFcn to return \c true are visited. Because - * the object Filtered returned from filter() is only a - * view the original Values object must not be deallocated or - * go out of scope as long as the view is needed. - * @param filterFcn The function that determines which key-value pairs are - * included in the filtered view, for which this function returns \c true - * on their keys. - * @return A filtered view of the original Values class, which references - * the original Values class. - */ - Filtered - filter(const std::function& filterFcn); + // Count values of given type \c ValueType + template + size_t count() const { + size_t i = 0; + for (const auto key_value : values_) { + if (dynamic_cast*>(key_value.second)) + ++i; + } + return i; + } /** - * Return a filtered view of this Values class, without copying any data. + * Extract a subset of values of the given type \c ValueType. + * * In this templated version, only key-value pairs whose value matches the * template argument \c ValueType and whose key causes the function argument * \c filterFcn to return true are visited when iterating over the filtered - * view. Because the object Filtered returned from filter() is only - * a view the original Values object must not be deallocated or - * go out of scope as long as the view is needed. + * view. This replaces the fancier but very boost-dependent \c filter methods + * that were previously available up to GTSAM 4.2. + * * @tparam ValueType The type that the value in a key-value pair must match * to be included in the filtered view. Currently, base classes are not * resolved so the type must match exactly, except if ValueType = Value, in @@ -354,61 +337,98 @@ namespace gtsam { * included in the filtered view, for which this function returns \c true * on their keys (default: always return true so that filter() only * filters by type, matching \c ValueType). - * @return A filtered view of the original Values class, which references - * the original Values class. + * @return An Eigen aligned map on Key with the filtered values. */ - template - Filtered + template + std::map // , std::less, Eigen::aligned_allocator + extract(const std::function& filterFcn = &_truePredicate) const; + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 + // Types obtained by iterating + typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair; + typedef KeyValueMap::iterator::value_type KeyValuePtrPair; + + /// Mutable forward iterator, with value type KeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::iterator> iterator; + + /// Const forward iterator, with value type ConstKeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::const_iterator> const_iterator; + + /// Mutable reverse iterator, with value type KeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::reverse_iterator> reverse_iterator; + + /// Const reverse iterator, with value type ConstKeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::const_reverse_iterator> const_reverse_iterator; + + /** insert that mimics the STL map insert - if the value already exists, the map is not modified + * and an iterator to the existing value is returned, along with 'false'. If the value did not + * exist, it is inserted and an iterator pointing to the new element, along with 'true', is + * returned. */ + std::pair tryInsert(Key j, const Value& value); + + static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) { + return ConstKeyValuePair(key_value.first, *key_value.second); } + + static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) { + return KeyValuePair(key_value.first, *key_value.second); } + + const_iterator _begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); } + const_iterator _end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); } + iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); } + iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); } + const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); } + const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); } + reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); } + reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); } + + /** Find the element greater than or equal to the specified key. */ + iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); } + + /** Find the lowest-ordered element greater than the specified key. */ + iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); } + + /** A filtered view of a Values, returned from Values::filter. */ + template + class Filtered; + + /** A filtered view of a const Values, returned from Values::filter. */ + template + class ConstFiltered; + + /** Constructor from a Filtered view copies out all values */ + template + Values(const Filtered& view); + + /** Constructor from a Filtered or ConstFiltered view */ + template + Values(const ConstFiltered& view); + + /// A filtered view of the original Values class. + Filtered GTSAM_DEPRECATED + filter(const std::function& filterFcn); + + /// A filtered view of the original Values class, also filter on type. + template + Filtered GTSAM_DEPRECATED filter(const std::function& filterFcn = &_truePredicate); - /** - * Return a filtered view of this Values class, without copying any data. - * When iterating over the filtered view, only the key-value pairs - * with a key causing \c filterFcn to return \c true are visited. Because - * the object Filtered returned from filter() is only a - * view the original Values object must not be deallocated or - * go out of scope as long as the view is needed. - * @param filterFcn The function that determines which key-value pairs are - * included in the filtered view, for which this function returns \c true - * on their keys. - * @return A filtered view of the original Values class, which references - * the original Values class. - */ - ConstFiltered + /// A filtered view of the original Values class, const version. + ConstFiltered GTSAM_DEPRECATED filter(const std::function& filterFcn) const; - /** - * Return a filtered view of this Values class, without copying any data. - * In this templated version, only key-value pairs whose value matches the - * template argument \c ValueType and whose key causes the function argument - * \c filterFcn to return true are visited when iterating over the filtered - * view. Because the object Filtered returned from filter() is only - * a view the original Values object must not be deallocated or - * go out of scope as long as the view is needed. - * @tparam ValueType The type that the value in a key-value pair must match - * to be included in the filtered view. Currently, base classes are not - * resolved so the type must match exactly, except if ValueType = Value, in - * which case no type filtering is done. - * @param filterFcn The function that determines which key-value pairs are - * included in the filtered view, for which this function returns \c true - * on their keys. - * @return A filtered view of the original Values class, which references - * the original Values class. - */ - template - ConstFiltered - filter(const std::function& filterFcn = &_truePredicate) const; - - // Count values of given type \c ValueType - template - size_t count() const { - size_t i = 0; - for (const auto key_value : *this) { - if (dynamic_cast*>(&key_value.value)) - ++i; - } - return i; - } + /// A filtered view of the original Values class, also on type, const. + template + ConstFiltered GTSAM_DEPRECATED filter( + const std::function& filterFcn = &_truePredicate) const; +#endif private: // Filters based on ValueType (if not Value) and also based on the user- @@ -427,12 +447,6 @@ namespace gtsam { ar & BOOST_SERIALIZATION_NVP(values_); } - static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) { - return ConstKeyValuePair(key_value.first, *key_value.second); } - - static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) { - return KeyValuePair(key_value.first, *key_value.second); } - }; /* ************************************************************************* */ diff --git a/gtsam/nonlinear/internal/LevenbergMarquardtState.h b/gtsam/nonlinear/internal/LevenbergMarquardtState.h index 75e5a5135c..a055f3060f 100644 --- a/gtsam/nonlinear/internal/LevenbergMarquardtState.h +++ b/gtsam/nonlinear/internal/LevenbergMarquardtState.h @@ -128,9 +128,10 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { noiseModelCache.resize(0); // for each of the variables, add a prior damped.reserve(damped.size() + values.size()); - for (const auto key_value : values) { - const Key key = key_value.key; - const size_t dim = key_value.value.dim(); + std::map dims = values.dims(); + for (const auto& key_dim : dims) { + const Key& key = key_dim.first; + const size_t& dim = key_dim.second; const CachedModel* item = getCachedModel(dim); damped += boost::make_shared(key, item->A, item->b, item->model); } diff --git a/gtsam/nonlinear/tests/priorFactor.xml b/gtsam/nonlinear/tests/priorFactor.xml new file mode 100644 index 0000000000..0c580fb211 --- /dev/null +++ b/gtsam/nonlinear/tests/priorFactor.xml @@ -0,0 +1,77 @@ + + + + + + + + + 1 + 0 + 12345 + + + + + + + + + 6 + + + 0 + + + + 6 + + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + + + + 6 + + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + 1.00000000000000000e+00 + + + + 1.00000000000000000e+00 + 1.00000000000000000e+00 + + + + + + + + 4.11982245665682978e-01 + -8.33737651774156818e-01 + -3.67630462924899259e-01 + -5.87266449276209815e-02 + -4.26917621276207360e-01 + 9.02381585483330806e-01 + -9.09297426825681709e-01 + -3.50175488374014632e-01 + -2.24845095366152908e-01 + + + + 4.00000000000000000e+00 + 5.00000000000000000e+00 + 6.00000000000000000e+00 + + + + + diff --git a/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp b/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp index 1423b473e7..2deece2282 100644 --- a/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp +++ b/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp @@ -29,9 +29,6 @@ #include -#include -using boost::assign::list_of; -using boost::assign::map_list_of; namespace gtsam { @@ -253,8 +250,7 @@ TEST(AdaptAutoDiff, SnavelyExpression) { internal::upAligned(RecordSize) + P.traceSize() + X.traceSize(), expression.traceSize()); - set expected = list_of(1)(2); - + const set expected{1, 2}; EXPECT(expected == expression.keys()); } diff --git a/gtsam/nonlinear/tests/testExpression.cpp b/gtsam/nonlinear/tests/testExpression.cpp index 9987cca3f9..a93f8a0e13 100644 --- a/gtsam/nonlinear/tests/testExpression.cpp +++ b/gtsam/nonlinear/tests/testExpression.cpp @@ -25,10 +25,6 @@ #include -#include -using boost::assign::list_of; -using boost::assign::map_list_of; - using namespace std; using namespace gtsam; @@ -90,7 +86,7 @@ Vector f3(const Point3& p, OptionalJacobian H) { return p; } Point3_ pointExpression(1); -set expected = list_of(1); +const set expected{1}; } // namespace unary // Create a unary expression that takes another expression as a single argument. @@ -190,14 +186,14 @@ TEST(Expression, BinaryToDouble) { /* ************************************************************************* */ // Check keys of an expression created from class method. TEST(Expression, BinaryKeys) { - set expected = list_of(1)(2); + const set expected{1, 2}; EXPECT(expected == binary::p_cam.keys()); } /* ************************************************************************* */ // Check dimensions by calling `dims` method. TEST(Expression, BinaryDimensions) { - map actual, expected = map_list_of(1, 6)(2, 3); + map actual, expected{{1, 6}, {2, 3}}; binary::p_cam.dims(actual); EXPECT(actual == expected); } @@ -227,14 +223,14 @@ Expression uv_hat(uncalibrate, K, projection); /* ************************************************************************* */ // keys TEST(Expression, TreeKeys) { - set expected = list_of(1)(2)(3); + const set expected{1, 2, 3}; EXPECT(expected == tree::uv_hat.keys()); } /* ************************************************************************* */ // dimensions TEST(Expression, TreeDimensions) { - map actual, expected = map_list_of(1, 6)(2, 3)(3, 5); + map actual, expected{{1, 6}, {2, 3}, {3, 5}}; tree::uv_hat.dims(actual); EXPECT(actual == expected); } @@ -265,7 +261,7 @@ TEST(Expression, compose1) { Rot3_ R3 = R1 * R2; // Check keys - set expected = list_of(1)(2); + const set expected{1, 2}; EXPECT(expected == R3.keys()); } @@ -277,7 +273,7 @@ TEST(Expression, compose2) { Rot3_ R3 = R1 * R2; // Check keys - set expected = list_of(1); + const set expected{1}; EXPECT(expected == R3.keys()); } @@ -289,7 +285,7 @@ TEST(Expression, compose3) { Rot3_ R3 = R1 * R2; // Check keys - set expected = list_of(3); + const set expected{3}; EXPECT(expected == R3.keys()); } @@ -302,7 +298,7 @@ TEST(Expression, compose4) { Double_ R3 = R1 * R2; // Check keys - set expected = list_of(1); + const set expected{1}; EXPECT(expected == R3.keys()); } @@ -326,7 +322,7 @@ TEST(Expression, ternary) { Rot3_ ABC(composeThree, A, B, C); // Check keys - set expected = list_of(1)(2)(3); + const set expected {1, 2, 3}; EXPECT(expected == ABC.keys()); } @@ -336,10 +332,10 @@ TEST(Expression, ScalarMultiply) { const Key key(67); const Point3_ expr = 23 * Point3_(key); - set expected_keys = list_of(key); + const set expected_keys{key}; EXPECT(expected_keys == expr.keys()); - map actual_dims, expected_dims = map_list_of(key, 3); + map actual_dims, expected_dims {{key, 3}}; expr.dims(actual_dims); EXPECT(actual_dims == expected_dims); @@ -367,10 +363,10 @@ TEST(Expression, BinarySum) { const Key key(67); const Point3_ sum_ = Point3_(key) + Point3_(Point3(1, 1, 1)); - set expected_keys = list_of(key); + const set expected_keys{key}; EXPECT(expected_keys == sum_.keys()); - map actual_dims, expected_dims = map_list_of(key, 3); + map actual_dims, expected_dims {{key, 3}}; sum_.dims(actual_dims); EXPECT(actual_dims == expected_dims); @@ -462,7 +458,7 @@ TEST(Expression, UnaryOfSum) { const Point3_ sum_ = Point3_(key1) + Point3_(key2); const Double_ norm_(>sam::norm3, sum_); - map actual_dims, expected_dims = map_list_of(key1, 3)(key2, 3); + map actual_dims, expected_dims = {{key1, 3}, {key2, 3}}; norm_.dims(actual_dims); EXPECT(actual_dims == expected_dims); @@ -485,7 +481,7 @@ TEST(Expression, WeightedSum) { const Key key1(42), key2(67); const Point3_ weighted_sum_ = 17 * Point3_(key1) + 23 * Point3_(key2); - map actual_dims, expected_dims = map_list_of(key1, 3)(key2, 3); + map actual_dims, expected_dims {{key1, 3}, {key2, 3}}; weighted_sum_.dims(actual_dims); EXPECT(actual_dims == expected_dims); diff --git a/gtsam/nonlinear/tests/testLinearContainerFactor.cpp b/gtsam/nonlinear/tests/testLinearContainerFactor.cpp index a5015546f5..9bd9ace98d 100644 --- a/gtsam/nonlinear/tests/testLinearContainerFactor.cpp +++ b/gtsam/nonlinear/tests/testLinearContainerFactor.cpp @@ -15,10 +15,7 @@ #include #include -#include - using namespace std; -using namespace boost::assign; using namespace gtsam; const gtsam::noiseModel::Diagonal::shared_ptr diag_model2 = noiseModel::Diagonal::Sigmas(Vector2(1.0, 1.0)); @@ -353,8 +350,8 @@ TEST(TestLinearContainerFactor, Rekey) { // For extra fun lets try linearizing this LCF gtsam::Values linearization_pt_rekeyed; - for (auto key_val : linearization_pt) { - linearization_pt_rekeyed.insert(key_map.at(key_val.key), key_val.value); + for (auto key : linearization_pt.keys()) { + linearization_pt_rekeyed.insert(key_map.at(key), linearization_pt.at(key)); } // Check independent values since we don't want to unnecessarily sort diff --git a/gtsam/nonlinear/tests/testSerializationNonlinear.cpp b/gtsam/nonlinear/tests/testSerializationNonlinear.cpp index 4a73cbb0b4..f402656c12 100644 --- a/gtsam/nonlinear/tests/testSerializationNonlinear.cpp +++ b/gtsam/nonlinear/tests/testSerializationNonlinear.cpp @@ -107,8 +107,60 @@ TEST (Serialization, TemplatedValues) { EXPECT(equalsBinary(values)); } -TEST(Serialization, ISAM2) { +/** + * Test deserializing from a known serialization generated by code from commit + * 0af17f438f62f4788f3a572ecd36d06d224fd1e1 (>4.2a7) + * We only test that deserialization matches since + * (1) that's the main backward compatibility requirement and + * (2) serialized string depends on boost version + * Also note: we don't run this test when quaternions or TBB are enabled since + * serialization structures are different and the serialized strings/xml are + * hard-coded in this test. + */ +TEST(Serialization, NoiseModelFactor1_backwards_compatibility) { + PriorFactor factor( + 12345, Pose3(Rot3::RzRyRx(Vector3(1., 2., 3.)), Point3(4., 5., 6.)), + noiseModel::Unit::Create(6)); + + // roundtrip (sanity check) + PriorFactor factor_deserialized_str_2 = PriorFactor(); + roundtrip(factor, factor_deserialized_str_2); + EXPECT(assert_equal(factor, factor_deserialized_str_2)); + +#if !defined(GTSAM_USE_QUATERNIONS) && !defined(GTSAM_USE_TBB) + // Deserialize string + std::string serialized_str = + "22 serialization::archive 15 1 0\n" + "0 0 0 0 0 0 0 1 0 12345 0 1 6 21 gtsam_noiseModel_Unit 1 0\n" + "1 1 0\n" + "2 1 0\n" + "3 0 0 0 0 6 0 1 0 0 0 6 1.00000000000000000e+00 1.00000000000000000e+00 " + "1.00000000000000000e+00 1.00000000000000000e+00 1.00000000000000000e+00 " + "1.00000000000000000e+00 6 1.00000000000000000e+00 " + "1.00000000000000000e+00 1.00000000000000000e+00 1.00000000000000000e+00 " + "1.00000000000000000e+00 1.00000000000000000e+00 1.00000000000000000e+00 " + "1.00000000000000000e+00 0 0 0 0 4.11982245665682978e-01 " + "-8.33737651774156818e-01 -3.67630462924899259e-01 " + "-5.87266449276209815e-02 -4.26917621276207360e-01 " + "9.02381585483330806e-01 -9.09297426825681709e-01 " + "-3.50175488374014632e-01 -2.24845095366152908e-01 0 0 " + "4.00000000000000000e+00 5.00000000000000000e+00 " + "6.00000000000000000e+00\n"; + + PriorFactor factor_deserialized_str = PriorFactor(); + deserializeFromString(serialized_str, factor_deserialized_str); + EXPECT(assert_equal(factor, factor_deserialized_str)); + + // Deserialize XML + PriorFactor factor_deserialized_xml = PriorFactor(); + deserializeFromXMLFile(GTSAM_SOURCE_TREE_DATASET_DIR + "/../../gtsam/nonlinear/tests/priorFactor.xml", + factor_deserialized_xml); + EXPECT(assert_equal(factor, factor_deserialized_xml)); +#endif +} +TEST(Serialization, ISAM2) { gtsam::ISAM2Params parameters; gtsam::ISAM2 solver(parameters); gtsam::NonlinearFactorGraph graph; diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index bed2a8af93..758d9a5a32 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -25,15 +25,11 @@ #include #include -#include // for operator += -#include -#include #include #include #include #include -using namespace boost::assign; using namespace std::placeholders; using namespace gtsam; using namespace std; @@ -199,6 +195,14 @@ TEST(Values, basic_functions) values.insert(6, M1); values.insert(8, M2); + size_t count = 0; + for (const auto& it : values) { + count += 1; + if (it.key == 2 || it.key == 4) EXPECT_LONGS_EQUAL(3, it.value.dim()); + if (it.key == 6 || it.key == 8) EXPECT_LONGS_EQUAL(6, it.value.dim()); + } + EXPECT_LONGS_EQUAL(4, count); + // find EXPECT_LONGS_EQUAL(4, values.find(4)->key); EXPECT_LONGS_EQUAL(4, values_c.find(4)->key); @@ -214,7 +218,6 @@ TEST(Values, basic_functions) EXPECT_LONGS_EQUAL(6, values_c.upper_bound(4)->key); EXPECT_LONGS_EQUAL(4, values.upper_bound(3)->key); EXPECT_LONGS_EQUAL(4, values_c.upper_bound(3)->key); - } /* ************************************************************************* */ @@ -224,9 +227,8 @@ TEST(Values, retract_full) config0.insert(key1, Vector3(1.0, 2.0, 3.0)); config0.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues delta = pair_list_of - (key1, Vector3(1.0, 1.1, 1.2)) - (key2, Vector3(1.3, 1.4, 1.5)); + const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)}, + {key2, Vector3(1.3, 1.4, 1.5)}}; Values expected; expected.insert(key1, Vector3(2.0, 3.1, 4.2)); @@ -243,8 +245,7 @@ TEST(Values, retract_partial) config0.insert(key1, Vector3(1.0, 2.0, 3.0)); config0.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues delta = pair_list_of - (key2, Vector3(1.3, 1.4, 1.5)); + const VectorValues delta{{key2, Vector3(1.3, 1.4, 1.5)}}; Values expected; expected.insert(key1, Vector3(1.0, 2.0, 3.0)); @@ -254,6 +255,24 @@ TEST(Values, retract_partial) CHECK(assert_equal(expected, Values(config0, delta))); } +/* ************************************************************************* */ +TEST(Values, retract_masked) +{ + Values config0; + config0.insert(key1, Vector3(1.0, 2.0, 3.0)); + config0.insert(key2, Vector3(5.0, 6.0, 7.0)); + + const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)}, + {key2, Vector3(1.3, 1.4, 1.5)}}; + + Values expected; + expected.insert(key1, Vector3(1.0, 2.0, 3.0)); + expected.insert(key2, Vector3(6.3, 7.4, 8.5)); + + config0.retractMasked(delta, {key2}); + CHECK(assert_equal(expected, config0)); +} + /* ************************************************************************* */ TEST(Values, equals) { @@ -279,9 +298,8 @@ TEST(Values, localCoordinates) valuesA.insert(key1, Vector3(1.0, 2.0, 3.0)); valuesA.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues expDelta = pair_list_of - (key1, Vector3(0.1, 0.2, 0.3)) - (key2, Vector3(0.4, 0.5, 0.6)); + VectorValues expDelta{{key1, Vector3(0.1, 0.2, 0.3)}, + {key2, Vector3(0.4, 0.5, 0.6)}}; Values valuesB = valuesA.retract(expDelta); @@ -350,6 +368,7 @@ TEST(Values, filter) { values.insert(2, pose2); values.insert(3, pose3); +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 // Filter by key int i = 0; Values::Filtered filtered = values.filter(std::bind(std::greater_equal(), std::placeholders::_1, 2)); @@ -402,8 +421,6 @@ TEST(Values, filter) { ++ i; } EXPECT_LONGS_EQUAL(2, (long)i); - EXPECT_LONGS_EQUAL(2, (long)values.count()); - EXPECT_LONGS_EQUAL(2, (long)values.count()); // construct a values with the view Values actualSubValues2(pose_filtered); @@ -411,6 +428,16 @@ TEST(Values, filter) { expectedSubValues2.insert(1, pose1); expectedSubValues2.insert(3, pose3); EXPECT(assert_equal(expectedSubValues2, actualSubValues2)); +#endif + + // Test counting by type. + EXPECT_LONGS_EQUAL(2, (long)values.count()); + EXPECT_LONGS_EQUAL(2, (long)values.count()); + + // Filter by type using extract. + auto extracted_pose3s = values.extract(); + EXPECT_LONGS_EQUAL(2, (long)extracted_pose3s.size()); + } /* ************************************************************************* */ @@ -426,6 +453,7 @@ TEST(Values, Symbol_filter) { values.insert(X(2), pose2); values.insert(Symbol('y', 3), pose3); +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 int i = 0; for(const auto key_value: values.filter(Symbol::ChrTest('y'))) { if(i == 0) { @@ -440,6 +468,12 @@ TEST(Values, Symbol_filter) { ++ i; } LONGS_EQUAL(2, (long)i); +#endif + +// Test extract with filter on symbol: + auto extracted_pose3s = values.extract(Symbol::ChrTest('y')); + EXPECT_LONGS_EQUAL(2, (long)extracted_pose3s.size()); + } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/tests/testWhiteNoiseFactor.cpp b/gtsam/nonlinear/tests/testWhiteNoiseFactor.cpp index 877c9adbcf..c5ffd1ff79 100644 --- a/gtsam/nonlinear/tests/testWhiteNoiseFactor.cpp +++ b/gtsam/nonlinear/tests/testWhiteNoiseFactor.cpp @@ -18,8 +18,6 @@ #include #include -#include // for operator += -using namespace boost::assign; using namespace gtsam; using namespace std; diff --git a/gtsam/nonlinear/utilities.h b/gtsam/nonlinear/utilities.h index d2b38d3743..985e1a5f4e 100644 --- a/gtsam/nonlinear/utilities.h +++ b/gtsam/nonlinear/utilities.h @@ -90,19 +90,19 @@ KeySet createKeySet(std::string s, const Vector& I) { /// Extract all Point2 values into a single matrix [x y] Matrix extractPoint2(const Values& values) { - Values::ConstFiltered points = values.filter(); + const auto points = values.extract(); // Point2 is aliased as a gtsam::Vector in the wrapper - Values::ConstFiltered points2 = values.filter(); + const auto points2 = values.extract(); Matrix result(points.size() + points2.size(), 2); size_t j = 0; for (const auto& key_value : points) { - result.row(j++) = key_value.value; + result.row(j++) = key_value.second; } for (const auto& key_value : points2) { - if (key_value.value.rows() == 2) { - result.row(j++) = key_value.value; + if (key_value.second.rows() == 2) { + result.row(j++) = key_value.second; } } return result; @@ -110,19 +110,19 @@ Matrix extractPoint2(const Values& values) { /// Extract all Point3 values into a single matrix [x y z] Matrix extractPoint3(const Values& values) { - Values::ConstFiltered points = values.filter(); + const auto points = values.extract(); // Point3 is aliased as a gtsam::Vector in the wrapper - Values::ConstFiltered points2 = values.filter(); + const auto points2 = values.extract(); Matrix result(points.size() + points2.size(), 3); size_t j = 0; for (const auto& key_value : points) { - result.row(j++) = key_value.value; + result.row(j++) = key_value.second; } for (const auto& key_value : points2) { - if (key_value.value.rows() == 3) { - result.row(j++) = key_value.value; + if (key_value.second.rows() == 3) { + result.row(j++) = key_value.second; } } return result; @@ -130,34 +130,40 @@ Matrix extractPoint3(const Values& values) { /// Extract all Pose3 values Values allPose2s(const Values& values) { - return values.filter(); + Values result; + for(const auto& key_value: values.extract()) + result.insert(key_value.first, key_value.second); + return result; } /// Extract all Pose2 values into a single matrix [x y theta] Matrix extractPose2(const Values& values) { - Values::ConstFiltered poses = values.filter(); + const auto poses = values.extract(); Matrix result(poses.size(), 3); size_t j = 0; for(const auto& key_value: poses) - result.row(j++) << key_value.value.x(), key_value.value.y(), key_value.value.theta(); + result.row(j++) << key_value.second.x(), key_value.second.y(), key_value.second.theta(); return result; } /// Extract all Pose3 values Values allPose3s(const Values& values) { - return values.filter(); + Values result; + for(const auto& key_value: values.extract()) + result.insert(key_value.first, key_value.second); + return result; } /// Extract all Pose3 values into a single matrix [r11 r12 r13 r21 r22 r23 r31 r32 r33 x y z] Matrix extractPose3(const Values& values) { - Values::ConstFiltered poses = values.filter(); + const auto poses = values.extract(); Matrix result(poses.size(), 12); size_t j = 0; for(const auto& key_value: poses) { - result.row(j).segment(0, 3) << key_value.value.rotation().matrix().row(0); - result.row(j).segment(3, 3) << key_value.value.rotation().matrix().row(1); - result.row(j).segment(6, 3) << key_value.value.rotation().matrix().row(2); - result.row(j).tail(3) = key_value.value.translation(); + result.row(j).segment(0, 3) << key_value.second.rotation().matrix().row(0); + result.row(j).segment(3, 3) << key_value.second.rotation().matrix().row(1); + result.row(j).segment(6, 3) << key_value.second.rotation().matrix().row(2); + result.row(j).tail(3) = key_value.second.translation(); j++; } return result; @@ -172,20 +178,19 @@ Matrix extractPose3(const Values& values) { /// variables x1, x2, ..., x200 of type Vector each 5-dimensional, will return a /// 200x5 matrix with row i containing xi. Matrix extractVectors(const Values& values, char c) { - Values::ConstFiltered vectors = - values.filter(Symbol::ChrTest(c)); + const auto vectors = values.extract(Symbol::ChrTest(c)); if (vectors.size() == 0) { return Matrix(); } - auto dim = vectors.begin()->value.size(); + auto dim = vectors.begin()->second.size(); Matrix result(vectors.size(), dim); Eigen::Index rowi = 0; for (const auto& kv : vectors) { - if (kv.value.size() != dim) { + if (kv.second.size() != dim) { throw std::runtime_error( "Tried to extract different-sized vectors into a single matrix"); } - result.row(rowi) = kv.value; + result.row(rowi) = kv.second; ++rowi; } return result; @@ -196,14 +201,14 @@ void perturbPoint2(Values& values, double sigma, int32_t seed = 42u) { noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(2, sigma); Sampler sampler(model, seed); - for (const auto& key_value : values.filter()) { - values.update(key_value.key, - key_value.value + Point2(sampler.sample())); + for (const auto& key_value : values.extract()) { + values.update(key_value.first, + key_value.second + Point2(sampler.sample())); } - for (const auto& key_value : values.filter()) { - if (key_value.value.rows() == 2) { - values.update(key_value.key, - key_value.value + Point2(sampler.sample())); + for (const auto& key_value : values.extract()) { + if (key_value.second.rows() == 2) { + values.update(key_value.first, + key_value.second + Point2(sampler.sample())); } } } @@ -214,8 +219,8 @@ void perturbPose2(Values& values, double sigmaT, double sigmaR, int32_t seed = noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas( Vector3(sigmaT, sigmaT, sigmaR)); Sampler sampler(model, seed); - for(const auto& key_value: values.filter()) { - values.update(key_value.key, key_value.value.retract(sampler.sample())); + for(const auto& key_value: values.extract()) { + values.update(key_value.first, key_value.second.retract(sampler.sample())); } } @@ -224,14 +229,14 @@ void perturbPoint3(Values& values, double sigma, int32_t seed = 42u) { noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(3, sigma); Sampler sampler(model, seed); - for (const auto& key_value : values.filter()) { - values.update(key_value.key, - key_value.value + Point3(sampler.sample())); + for (const auto& key_value : values.extract()) { + values.update(key_value.first, + key_value.second + Point3(sampler.sample())); } - for (const auto& key_value : values.filter()) { - if (key_value.value.rows() == 3) { - values.update(key_value.key, - key_value.value + Point3(sampler.sample())); + for (const auto& key_value : values.extract()) { + if (key_value.second.rows() == 3) { + values.update(key_value.first, + key_value.second + Point3(sampler.sample())); } } } diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index 58e98ebfa9..ea4171238a 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -207,9 +207,9 @@ Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { const size_t N = values.size(); const size_t p = values.at(0).rows(); Matrix S(p, N * d); - for (const auto it : values.filter()) { - S.middleCols(it.key * d) = - it.value.matrix().leftCols(); // project Qj to Stiefel + for (const auto& it : values.extract()) { + S.middleCols(it.first * d) = + it.second.matrix().leftCols(); // project Qj to Stiefel } return S; } @@ -218,11 +218,11 @@ Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { template <> Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { Values result; - for (const auto it : values.filter()) { - assert(it.value.rows() == p); - const auto &M = it.value.matrix(); + for (const auto& it : values.extract()) { + assert(it.second.rows() == p); + const auto &M = it.second.matrix(); const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0)); - result.insert(it.key, R); + result.insert(it.first, R); } return result; } @@ -230,11 +230,11 @@ Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { template <> Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { Values result; - for (const auto it : values.filter()) { - assert(it.value.rows() == p); - const auto &M = it.value.matrix(); + for (const auto& it : values.extract()) { + assert(it.second.rows() == p); + const auto &M = it.second.matrix(); const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>()); - result.insert(it.key, R); + result.insert(it.first, R); } return result; } @@ -326,8 +326,8 @@ double ShonanAveraging::cost(const Values &values) const { } // Finally, project each dxd rotation block to SO(d) Values result; - for (const auto it : values.filter()) { - result.insert(it.key, SO(it.value.matrix())); + for (const auto& it : values.extract()) { + result.insert(it.first, SO(it.second.matrix())); } return graph.error(result); } @@ -958,7 +958,7 @@ static BinaryMeasurement convertPose2ToBinaryMeasurementRot2( // the (2,2) entry of Pose2's covariance corresponds to Rot2's covariance // because the tangent space of Pose2 is ordered as (vx, vy, w) auto model = noiseModel::Isotropic::Variance(1, M(2, 2)); - return BinaryMeasurement(f->key1(), f->key2(), f->measured().rotation(), + return BinaryMeasurement(f->key<1>(), f->key<2>(), f->measured().rotation(), model); } @@ -1006,7 +1006,7 @@ static BinaryMeasurement convert( // the upper-left 3x3 sub-block of Pose3's covariance corresponds to Rot3's covariance // because the tangent space of Pose3 is ordered as (w,T) where w and T are both Vector3's auto model = noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0)); - return BinaryMeasurement(f->key1(), f->key2(), f->measured().rotation(), + return BinaryMeasurement(f->key<1>(), f->key<2>(), f->measured().rotation(), model); } diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h index e035da4c78..b40916828f 100644 --- a/gtsam/sfm/ShonanAveraging.h +++ b/gtsam/sfm/ShonanAveraging.h @@ -366,8 +366,8 @@ class GTSAM_EXPORT ShonanAveraging { template static Values LiftTo(size_t p, const Values &values) { Values result; - for (const auto it : values.filter()) { - result.insert(it.key, SOn::Lift(p, it.value.matrix())); + for (const auto it : values.extract()) { + result.insert(it.first, SOn::Lift(p, it.second.matrix())); } return result; } diff --git a/gtsam/sfm/ShonanFactor.cpp b/gtsam/sfm/ShonanFactor.cpp index b911fb5a47..a48b6e6fa7 100644 --- a/gtsam/sfm/ShonanFactor.cpp +++ b/gtsam/sfm/ShonanFactor.cpp @@ -35,7 +35,7 @@ template ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, const SharedNoiseModel &model, const boost::shared_ptr &G) - : NoiseModelFactor2(ConvertNoiseModel(model, p * d), j1, j2), + : NoiseModelFactorN(ConvertNoiseModel(model, p * d), j1, j2), M_(R12.matrix()), // d*d in all cases p_(p), // 4 for SO(4) pp_(p * p), // 16 for SO(4) @@ -57,8 +57,8 @@ ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, template void ShonanFactor::print(const std::string &s, const KeyFormatter &keyFormatter) const { - std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key1()) << "," - << keyFormatter(key2()) << ")\n"; + std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key<1>()) << "," + << keyFormatter(key<2>()) << ")\n"; traits::Print(M_, " M: "); noiseModel_->print(" noise model: "); } @@ -68,7 +68,7 @@ template bool ShonanFactor::equals(const NonlinearFactor &expected, double tol) const { auto e = dynamic_cast(&expected); - return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + return e != nullptr && NoiseModelFactorN::equals(*e, tol) && p_ == e->p_ && M_ == e->M_; } diff --git a/gtsam/sfm/ShonanFactor.h b/gtsam/sfm/ShonanFactor.h index 3c43c2c52a..78cc397656 100644 --- a/gtsam/sfm/ShonanFactor.h +++ b/gtsam/sfm/ShonanFactor.h @@ -33,7 +33,7 @@ namespace gtsam { * the SO(p) matrices down to a Stiefel manifold of p*d matrices. */ template -class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT ShonanFactor : public NoiseModelFactorN { Matrix M_; ///< measured rotation between R1 and R2 size_t p_, pp_; ///< dimensionality constants boost::shared_ptr G_; ///< matrix of vectorized generators @@ -66,7 +66,7 @@ class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { double tol = 1e-9) const override; /// @} - /// @name NoiseModelFactor2 methods + /// @name NoiseModelFactorN methods /// @{ /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] diff --git a/gtsam/sfm/ShonanGaugeFactor.h b/gtsam/sfm/ShonanGaugeFactor.h index d6240b1c42..d814aafa10 100644 --- a/gtsam/sfm/ShonanGaugeFactor.h +++ b/gtsam/sfm/ShonanGaugeFactor.h @@ -22,8 +22,6 @@ #include #include -#include - namespace gtsam { /** * The ShonanGaugeFactor creates a constraint on a single SO(n) to avoid moving @@ -59,7 +57,7 @@ class GTSAM_EXPORT ShonanGaugeFactor : public NonlinearFactor { */ ShonanGaugeFactor(Key key, size_t p, size_t d = 3, boost::optional gamma = boost::none) - : NonlinearFactor(boost::assign::cref_list_of<1>(key)) { + : NonlinearFactor(KeyVector{key}) { if (p < d) { throw std::invalid_argument("ShonanGaugeFactor must have p>=d."); } diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h index 735746b3a0..8850d7d2d0 100644 --- a/gtsam/sfm/TranslationFactor.h +++ b/gtsam/sfm/TranslationFactor.h @@ -39,9 +39,9 @@ namespace gtsam { * * */ -class TranslationFactor : public NoiseModelFactor2 { +class TranslationFactor : public NoiseModelFactorN { private: - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; Point3 measured_w_aZb_; public: diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index dc72f07b22..d420e2b54d 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -37,7 +37,7 @@ namespace gtsam { * @ingroup slam */ template - class BetweenFactor: public NoiseModelFactor2 { + class BetweenFactor: public NoiseModelFactorN { // Check that VALUE type is a testable Lie group BOOST_CONCEPT_ASSERT((IsTestable)); @@ -50,7 +50,7 @@ namespace gtsam { private: typedef BetweenFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; VALUE measured_; /** The measurement */ @@ -101,7 +101,7 @@ namespace gtsam { } /// @} - /// @name NoiseModelFactor2 methods + /// @name NoiseModelFactorN methods /// @{ /// evaluate error, returns vector of errors size of tangent space @@ -136,6 +136,7 @@ namespace gtsam { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/slam/BoundingConstraint.h b/gtsam/slam/BoundingConstraint.h index c09d4136d4..bd20668d84 100644 --- a/gtsam/slam/BoundingConstraint.h +++ b/gtsam/slam/BoundingConstraint.h @@ -30,9 +30,9 @@ namespace gtsam { * @ingroup slam */ template -struct BoundingConstraint1: public NoiseModelFactor1 { +struct BoundingConstraint1: public NoiseModelFactorN { typedef VALUE X; - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef boost::shared_ptr > shared_ptr; double threshold_; @@ -85,6 +85,7 @@ struct BoundingConstraint1: public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(threshold_); @@ -97,11 +98,11 @@ struct BoundingConstraint1: public NoiseModelFactor1 { * to implement for specific systems */ template -struct BoundingConstraint2: public NoiseModelFactor2 { +struct BoundingConstraint2: public NoiseModelFactorN { typedef VALUE1 X1; typedef VALUE2 X2; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef boost::shared_ptr > shared_ptr; double threshold_; @@ -158,6 +159,7 @@ struct BoundingConstraint2: public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(threshold_); diff --git a/gtsam/slam/EssentialMatrixConstraint.cpp b/gtsam/slam/EssentialMatrixConstraint.cpp index 5397c2e57d..c1f8b286bd 100644 --- a/gtsam/slam/EssentialMatrixConstraint.cpp +++ b/gtsam/slam/EssentialMatrixConstraint.cpp @@ -27,8 +27,8 @@ namespace gtsam { /* ************************************************************************* */ void EssentialMatrixConstraint::print(const std::string& s, const KeyFormatter& keyFormatter) const { - std::cout << s << "EssentialMatrixConstraint(" << keyFormatter(this->key1()) - << "," << keyFormatter(this->key2()) << ")\n"; + std::cout << s << "EssentialMatrixConstraint(" << keyFormatter(this->key<1>()) + << "," << keyFormatter(this->key<2>()) << ")\n"; measuredE_.print(" measured: "); this->noiseModel_->print(" noise model: "); } diff --git a/gtsam/slam/EssentialMatrixConstraint.h b/gtsam/slam/EssentialMatrixConstraint.h index dd3c522861..9d84dfa7be 100644 --- a/gtsam/slam/EssentialMatrixConstraint.h +++ b/gtsam/slam/EssentialMatrixConstraint.h @@ -27,12 +27,12 @@ namespace gtsam { * Binary factor between two Pose3 variables induced by an EssentialMatrix measurement * @ingroup slam */ -class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactor2 { +class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactorN { private: typedef EssentialMatrixConstraint This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; EssentialMatrix measuredE_; /** The measurement is an essential matrix */ @@ -93,6 +93,7 @@ class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactor2 void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); diff --git a/gtsam/slam/EssentialMatrixFactor.h b/gtsam/slam/EssentialMatrixFactor.h index 5997ad2247..8a0277a459 100644 --- a/gtsam/slam/EssentialMatrixFactor.h +++ b/gtsam/slam/EssentialMatrixFactor.h @@ -31,10 +31,10 @@ namespace gtsam { /** * Factor that evaluates epipolar error p'Ep for given essential matrix */ -class EssentialMatrixFactor : public NoiseModelFactor1 { +class EssentialMatrixFactor : public NoiseModelFactorN { Vector3 vA_, vB_; ///< Homogeneous versions, in ideal coordinates - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef EssentialMatrixFactor This; public: @@ -106,12 +106,12 @@ class EssentialMatrixFactor : public NoiseModelFactor1 { * in image 2 is perfect, and returns re-projection error in image 1 */ class EssentialMatrixFactor2 - : public NoiseModelFactor2 { + : public NoiseModelFactorN { Point3 dP1_; ///< 3D point corresponding to measurement in image 1 Point2 pn_; ///< Measurement in image 2, in ideal coordinates double f_; ///< approximate conversion factor for error scaling - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef EssentialMatrixFactor2 This; public: @@ -321,11 +321,11 @@ class EssentialMatrixFactor3 : public EssentialMatrixFactor2 { */ template class EssentialMatrixFactor4 - : public NoiseModelFactor2 { + : public NoiseModelFactorN { private: Point2 pA_, pB_; ///< points in pixel coordinates - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef EssentialMatrixFactor4 This; static constexpr int DimK = FixedDimension::value; diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h index 05e23ce6de..2e7e28d033 100644 --- a/gtsam/slam/FrobeniusFactor.h +++ b/gtsam/slam/FrobeniusFactor.h @@ -48,7 +48,7 @@ ConvertNoiseModel(const SharedNoiseModel &model, size_t n, * element of SO(3) or SO(4). */ template -class FrobeniusPrior : public NoiseModelFactor1 { +class FrobeniusPrior : public NoiseModelFactorN { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; using MatrixNN = typename Rot::MatrixNN; Eigen::Matrix vecM_; ///< vectorized matrix to approximate @@ -59,7 +59,7 @@ class FrobeniusPrior : public NoiseModelFactor1 { /// Constructor FrobeniusPrior(Key j, const MatrixNN& M, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor1(ConvertNoiseModel(model, Dim), j) { + : NoiseModelFactorN(ConvertNoiseModel(model, Dim), j) { vecM_ << Eigen::Map(M.data(), Dim, 1); } @@ -75,13 +75,13 @@ class FrobeniusPrior : public NoiseModelFactor1 { * The template argument can be any fixed-size SO. */ template -class FrobeniusFactor : public NoiseModelFactor2 { +class FrobeniusFactor : public NoiseModelFactorN { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; public: /// Constructor FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor2(ConvertNoiseModel(model, Dim), j1, + : NoiseModelFactorN(ConvertNoiseModel(model, Dim), j1, j2) {} /// Error is just Frobenius norm between rotation matrices. @@ -101,7 +101,7 @@ class FrobeniusFactor : public NoiseModelFactor2 { * and in fact only SO3 and SO4 really work, as we need SO::AdjointMap. */ template -class FrobeniusBetweenFactor : public NoiseModelFactor2 { +class FrobeniusBetweenFactor : public NoiseModelFactorN { Rot R12_; ///< measured rotation between R1 and R2 Eigen::Matrix R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1 @@ -116,7 +116,7 @@ class FrobeniusBetweenFactor : public NoiseModelFactor2 { /// Construct from two keys and measured rotation FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor2( + : NoiseModelFactorN( ConvertNoiseModel(model, Dim), j1, j2), R12_(R12), R2hat_H_R1_(R12.inverse().AdjointMap()) {} @@ -140,12 +140,12 @@ class FrobeniusBetweenFactor : public NoiseModelFactor2 { bool equals(const NonlinearFactor &expected, double tol = 1e-9) const override { auto e = dynamic_cast(&expected); - return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + return e != nullptr && NoiseModelFactorN::equals(*e, tol) && traits::Equals(this->R12_, e->R12_, tol); } /// @} - /// @name NoiseModelFactor2 methods + /// @name NoiseModelFactorN methods /// @{ /// Error is Frobenius norm between R1*R12 and R2. diff --git a/gtsam/slam/GeneralSFMFactor.h b/gtsam/slam/GeneralSFMFactor.h index 23f10de343..37296c0d88 100644 --- a/gtsam/slam/GeneralSFMFactor.h +++ b/gtsam/slam/GeneralSFMFactor.h @@ -57,7 +57,7 @@ namespace gtsam { * @ingroup slam */ template -class GeneralSFMFactor: public NoiseModelFactor2 { +class GeneralSFMFactor: public NoiseModelFactorN { GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA) GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK) @@ -74,7 +74,7 @@ class GeneralSFMFactor: public NoiseModelFactor2 { public: typedef GeneralSFMFactor This;///< typedef for this object - typedef NoiseModelFactor2 Base;///< typedef for the base class + typedef NoiseModelFactorN Base;///< typedef for the base class // shorthand for a smart pointer to a factor typedef boost::shared_ptr shared_ptr; @@ -184,6 +184,7 @@ class GeneralSFMFactor: public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(Archive & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); @@ -200,7 +201,7 @@ struct traits > : Testable< * Compared to GeneralSFMFactor, it is a ternary-factor because the calibration is isolated from camera.. */ template -class GeneralSFMFactor2: public NoiseModelFactor3 { +class GeneralSFMFactor2: public NoiseModelFactorN { GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION) static const int DimK = FixedDimension::value; @@ -213,7 +214,7 @@ class GeneralSFMFactor2: public NoiseModelFactor3 { typedef GeneralSFMFactor2 This; typedef PinholeCamera Camera;///< typedef for camera type - typedef NoiseModelFactor3 Base;///< typedef for the base class + typedef NoiseModelFactorN Base;///< typedef for the base class // shorthand for a smart pointer to a factor typedef boost::shared_ptr shared_ptr; @@ -285,6 +286,7 @@ class GeneralSFMFactor2: public NoiseModelFactor3 { friend class boost::serialization::access; template void serialize(Archive & ar, const unsigned int /*version*/) { + // NoiseModelFactor3 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor3", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/slam/InitializePose.h b/gtsam/slam/InitializePose.h index f9b99e47e3..79e3fe813e 100644 --- a/gtsam/slam/InitializePose.h +++ b/gtsam/slam/InitializePose.h @@ -62,10 +62,10 @@ static Values computePoses(const Values& initialRot, // Upgrade rotations to full poses Values initialPose; - for (const auto key_value : initialRot) { - Key key = key_value.key; - const auto& rot = initialRot.at(key); - Pose initializedPose = Pose(rot, origin); + for (const auto& key_rot : initialRot.extract()) { + const Key& key = key_rot.first; + const auto& rot = key_rot.second; + const Pose initializedPose(rot, origin); initialPose.insert(key, initializedPose); } @@ -82,14 +82,14 @@ static Values computePoses(const Values& initialRot, params.setVerbosity("TERMINATION"); } GaussNewtonOptimizer optimizer(*posegraph, initialPose, params); - Values GNresult = optimizer.optimize(); + const Values GNresult = optimizer.optimize(); // put into Values structure Values estimate; - for (const auto key_value : GNresult) { - Key key = key_value.key; + for (const auto& key_pose : GNresult.extract()) { + const Key& key = key_pose.first; if (key != kAnchorKey) { - const Pose& pose = GNresult.at(key); + const Pose& pose = key_pose.second; estimate.insert(key, pose); } } diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index 43404df537..75c33efa6f 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -122,12 +122,12 @@ Values InitializePose3::computeOrientationsGradient( gttic(InitializePose3_computeOrientationsGradient); // this works on the inverse rotations, according to Tron&Vidal,2011 - Values inverseRot; - inverseRot.insert(initialize::kAnchorKey, Rot3()); - for(const auto key_value: givenGuess) { - Key key = key_value.key; - const Pose3& pose = givenGuess.at(key); - inverseRot.insert(key, pose.rotation().inverse()); + std::map inverseRot; + inverseRot.emplace(initialize::kAnchorKey, Rot3()); + for(const auto& key_pose: givenGuess.extract()) { + const Key& key = key_pose.first; + const Pose3& pose = key_pose.second; + inverseRot.emplace(key, pose.rotation().inverse()); } // Create the map of edges incident on each node @@ -138,10 +138,8 @@ Values InitializePose3::computeOrientationsGradient( // calculate max node degree & allocate gradient size_t maxNodeDeg = 0; - VectorValues grad; - for(const auto key_value: inverseRot) { - Key key = key_value.key; - grad.insert(key,Z_3x1); + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; size_t currNodeDeg = (adjEdgesMap.at(key)).size(); if(currNodeDeg > maxNodeDeg) maxNodeDeg = currNodeDeg; @@ -162,28 +160,29 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // compute the gradient at each node maxGrad = 0; - for (const auto key_value : inverseRot) { - Key key = key_value.key; + VectorValues grad; + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; + const Rot3& Ri = key_R.second; Vector gradKey = Z_3x1; // collect the gradient for each edge incident on key for (const size_t& factorId : adjEdgesMap.at(key)) { - Rot3 Rij = factorId2RotMap.at(factorId); - Rot3 Ri = inverseRot.at(key); + const Rot3& Rij = factorId2RotMap.at(factorId); auto factor = pose3Graph.at(factorId); const auto& keys = factor->keys(); if (key == keys[0]) { Key key1 = keys[1]; - Rot3 Rj = inverseRot.at(key1); + const Rot3& Rj = inverseRot.at(key1); gradKey = gradKey + gradientTron(Ri, Rij * Rj, a, b); } else if (key == keys[1]) { Key key0 = keys[0]; - Rot3 Rj = inverseRot.at(key0); + const Rot3& Rj = inverseRot.at(key0); gradKey = gradKey + gradientTron(Ri, Rij.between(Rj), a, b); } else { cout << "Error in gradient computation" << endl; } } // end of i-th gradient computation - grad.at(key) = stepsize * gradKey; + grad.insert(key, stepsize * gradKey); double normGradKey = (gradKey).norm(); if(normGradKey>maxGrad) @@ -192,8 +191,12 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // update estimates - inverseRot = inverseRot.retract(grad); - + for (auto& key_R : inverseRot) { + const Key& key = key_R.first; + Rot3& Ri = key_R.second; + Ri = Ri.retract(grad.at(key)); + } + ////////////////////////////////////////////////////////////////////////// // check stopping condition if (it>20 && maxGrad < 5e-3) @@ -201,13 +204,13 @@ Values InitializePose3::computeOrientationsGradient( } // enf of gradient iterations // Return correct rotations - const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior + const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior Values estimateRot; - for(const auto key_value: inverseRot) { - Key key = key_value.key; + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; if (key != initialize::kAnchorKey) { - const Rot3& R = inverseRot.at(key); - if(setRefFrame) + const Rot3& R = key_R.second; + if (setRefFrame) estimateRot.insert(key, Rref.compose(R.inverse())); else estimateRot.insert(key, R.inverse()); @@ -228,7 +231,7 @@ void InitializePose3::createSymbolicGraph( Rot3 Rij = pose3Between->measured().rotation(); factorId2RotMap->emplace(factorId, Rij); - Key key1 = pose3Between->key1(); + Key key1 = pose3Between->key<1>(); if (adjEdgesMap->find(key1) != adjEdgesMap->end()) { // key is already in adjEdgesMap->at(key1).push_back(factorId); } else { @@ -236,7 +239,7 @@ void InitializePose3::createSymbolicGraph( edge_id.push_back(factorId); adjEdgesMap->emplace(key1, edge_id); } - Key key2 = pose3Between->key2(); + Key key2 = pose3Between->key<2>(); if (adjEdgesMap->find(key2) != adjEdgesMap->end()) { // key is already in adjEdgesMap->at(key2).push_back(factorId); } else { diff --git a/gtsam/slam/OrientedPlane3Factor.cpp b/gtsam/slam/OrientedPlane3Factor.cpp index d7508f6b8a..7ead4ad115 100644 --- a/gtsam/slam/OrientedPlane3Factor.cpp +++ b/gtsam/slam/OrientedPlane3Factor.cpp @@ -15,8 +15,8 @@ namespace gtsam { void OrientedPlane3Factor::print(const string& s, const KeyFormatter& keyFormatter) const { cout << s << (s == "" ? "" : "\n"); - cout << "OrientedPlane3Factor Factor (" << keyFormatter(key1()) << ", " - << keyFormatter(key2()) << ")\n"; + cout << "OrientedPlane3Factor Factor (" << keyFormatter(key<1>()) << ", " + << keyFormatter(key<2>()) << ")\n"; measured_p_.print("Measured Plane"); this->noiseModel_->print(" noise model: "); } diff --git a/gtsam/slam/OrientedPlane3Factor.h b/gtsam/slam/OrientedPlane3Factor.h index 81bb790de7..1550201ecd 100644 --- a/gtsam/slam/OrientedPlane3Factor.h +++ b/gtsam/slam/OrientedPlane3Factor.h @@ -15,10 +15,10 @@ namespace gtsam { /** * Factor to measure a planar landmark from a given pose */ -class GTSAM_EXPORT OrientedPlane3Factor: public NoiseModelFactor2 { +class GTSAM_EXPORT OrientedPlane3Factor: public NoiseModelFactorN { protected: OrientedPlane3 measured_p_; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; public: /// Constructor @@ -49,10 +49,10 @@ class GTSAM_EXPORT OrientedPlane3Factor: public NoiseModelFactor2 { +class GTSAM_EXPORT OrientedPlane3DirectionPrior : public NoiseModelFactorN { protected: OrientedPlane3 measured_p_; /// measured plane parameters - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; public: typedef OrientedPlane3DirectionPrior This; diff --git a/gtsam/slam/PoseRotationPrior.h b/gtsam/slam/PoseRotationPrior.h index 20f12dbce2..759e66341d 100644 --- a/gtsam/slam/PoseRotationPrior.h +++ b/gtsam/slam/PoseRotationPrior.h @@ -16,11 +16,11 @@ namespace gtsam { template -class PoseRotationPrior : public NoiseModelFactor1 { +class PoseRotationPrior : public NoiseModelFactorN { public: typedef PoseRotationPrior This; - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef POSE Pose; typedef typename POSE::Translation Translation; typedef typename POSE::Rotation Rotation; @@ -92,6 +92,7 @@ class PoseRotationPrior : public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/slam/PoseTranslationPrior.h b/gtsam/slam/PoseTranslationPrior.h index 5bb3745e9b..e009ace293 100644 --- a/gtsam/slam/PoseTranslationPrior.h +++ b/gtsam/slam/PoseTranslationPrior.h @@ -18,10 +18,10 @@ namespace gtsam { * A prior on the translation part of a pose */ template -class PoseTranslationPrior : public NoiseModelFactor1 { +class PoseTranslationPrior : public NoiseModelFactorN { public: typedef PoseTranslationPrior This; - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef POSE Pose; typedef typename POSE::Translation Translation; typedef typename POSE::Rotation Rotation; @@ -91,6 +91,7 @@ class PoseTranslationPrior : public NoiseModelFactor1 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/slam/ProjectionFactor.h b/gtsam/slam/ProjectionFactor.h index 0fc1c0b54b..c90fc80d58 100644 --- a/gtsam/slam/ProjectionFactor.h +++ b/gtsam/slam/ProjectionFactor.h @@ -37,7 +37,7 @@ namespace gtsam { */ template - class GenericProjectionFactor: public NoiseModelFactor2 { + class GenericProjectionFactor: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -52,7 +52,7 @@ namespace gtsam { public: /// shorthand for base class type - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef GenericProjectionFactor This; diff --git a/gtsam/slam/ReferenceFrameFactor.h b/gtsam/slam/ReferenceFrameFactor.h index 40c8e29b74..383c81cc47 100644 --- a/gtsam/slam/ReferenceFrameFactor.h +++ b/gtsam/slam/ReferenceFrameFactor.h @@ -54,13 +54,13 @@ P transform_point( * specific classes of landmarks */ template -class ReferenceFrameFactor : public NoiseModelFactor3 { +class ReferenceFrameFactor : public NoiseModelFactorN { protected: /** default constructor for serialization only */ ReferenceFrameFactor() {} public: - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; typedef ReferenceFrameFactor This; typedef POINT Point; diff --git a/gtsam/slam/RotateFactor.h b/gtsam/slam/RotateFactor.h index 9e4091111c..85539e17e5 100644 --- a/gtsam/slam/RotateFactor.h +++ b/gtsam/slam/RotateFactor.h @@ -20,11 +20,11 @@ namespace gtsam { * with z and p measured and predicted angular velocities, and hence * p = iRc * z */ -class RotateFactor: public NoiseModelFactor1 { +class RotateFactor: public NoiseModelFactorN { Point3 p_, z_; ///< Predicted and measured directions, p = iRc * z - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef RotateFactor This; public: @@ -64,11 +64,11 @@ class RotateFactor: public NoiseModelFactor1 { * Factor on unknown rotation iRc that relates two directions c * Directions provide less constraints than a full rotation */ -class RotateDirectionsFactor: public NoiseModelFactor1 { +class RotateDirectionsFactor: public NoiseModelFactorN { Unit3 i_p_, c_z_; ///< Predicted and measured directions, i_p = iRc * c_z - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef RotateDirectionsFactor This; public: diff --git a/gtsam/slam/StereoFactor.h b/gtsam/slam/StereoFactor.h index ca1774e310..1013b22b1d 100644 --- a/gtsam/slam/StereoFactor.h +++ b/gtsam/slam/StereoFactor.h @@ -28,7 +28,7 @@ namespace gtsam { * @ingroup slam */ template -class GenericStereoFactor: public NoiseModelFactor2 { +class GenericStereoFactor: public NoiseModelFactorN { private: // Keep a copy of measurement and calibration for I/O @@ -43,7 +43,7 @@ class GenericStereoFactor: public NoiseModelFactor2 { public: // shorthand for base class type - typedef NoiseModelFactor2 Base; ///< typedef for base class + typedef NoiseModelFactorN Base; ///< typedef for base class typedef GenericStereoFactor This; ///< typedef for this class (with templates) typedef boost::shared_ptr shared_ptr; ///< typedef for shared pointer to this object typedef POSE CamPose; ///< typedef for Pose Lie Value type @@ -170,6 +170,7 @@ class GenericStereoFactor: public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(Archive & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam/slam/TriangulationFactor.h b/gtsam/slam/TriangulationFactor.h index 80d3230199..f33ba2ca22 100644 --- a/gtsam/slam/TriangulationFactor.h +++ b/gtsam/slam/TriangulationFactor.h @@ -30,7 +30,7 @@ namespace gtsam { * @ingroup slam */ template -class TriangulationFactor: public NoiseModelFactor1 { +class TriangulationFactor: public NoiseModelFactorN { public: @@ -40,7 +40,7 @@ class TriangulationFactor: public NoiseModelFactor1 { protected: /// shorthand for base class type - using Base = NoiseModelFactor1; + using Base = NoiseModelFactorN; /// shorthand for this class using This = TriangulationFactor; diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 511cbd8390..9cb12aefba 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -41,7 +41,6 @@ #include #include -#include #include #include #include @@ -535,7 +534,7 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, graph->push_back(*f); // Insert vertices if pure odometry file - Key key1 = (*f)->key1(), key2 = (*f)->key2(); + Key key1 = (*f)->key<1>(), key2 = (*f)->key<2>(); if (!initial->exists(key1)) initial->insert(key1, Pose2()); if (!initial->exists(key2)) @@ -587,9 +586,9 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, fstream stream(filename.c_str(), fstream::out); // save poses - for (const auto key_value : config) { - const Pose2 &pose = key_value.value.cast(); - stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() + for (const auto &key_pose : config.extract()) { + const Pose2 &pose = key_pose.second; + stream << "VERTEX2 " << key_pose.first << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } @@ -603,7 +602,7 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, continue; const Pose2 pose = factor->measured().inverse(); - stream << "EDGE2 " << factor->key2() << " " << factor->key1() << " " + stream << "EDGE2 " << factor->key<2>() << " " << factor->key<1>() << " " << pose.x() << " " << pose.y() << " " << pose.theta() << " " << RR(0, 0) << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) << " " << RR(0, 2) << " " << RR(1, 2) << endl; @@ -636,45 +635,33 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, auto index = [](gtsam::Key key) { return Symbol(key).index(); }; // save 2D poses - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Pose2 &pose = p->value(); - stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " + for (const auto &pair : estimate.extract()) { + const Pose2 &pose = pair.second; + stream << "VERTEX_SE2 " << index(pair.first) << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } // save 3D poses - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Pose3 &pose = p->value(); + for (const auto &pair : estimate.extract()) { + const Pose3 &pose = pair.second; const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); - stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " + stream << "VERTEX_SE3:QUAT " << index(pair.first) << " " << t.x() << " " << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w() << endl; } // save 2D landmarks - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Point2 &point = p->value(); - stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " + for (const auto &pair : estimate.extract()) { + const Point2 &point = pair.second; + stream << "VERTEX_XY " << index(pair.first) << " " << point.x() << " " << point.y() << endl; } // save 3D landmarks - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Point3 &point = p->value(); - stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + for (const auto &pair : estimate.extract()) { + const Point3 &point = pair.second; + stream << "VERTEX_TRACKXYZ " << index(pair.first) << " " << point.x() << " " << point.y() << " " << point.z() << endl; } @@ -691,8 +678,8 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, } Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); - stream << "EDGE_SE2 " << index(factor->key1()) << " " - << index(factor->key2()) << " " << pose.x() << " " << pose.y() + stream << "EDGE_SE2 " << index(factor->key<1>()) << " " + << index(factor->key<2>()) << " " << pose.x() << " " << pose.y() << " " << pose.theta(); for (size_t i = 0; i < 3; i++) { for (size_t j = i; j < 3; j++) { @@ -717,8 +704,8 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, const Pose3 pose3D = factor3D->measured(); const Point3 p = pose3D.translation(); const auto q = pose3D.rotation().toQuaternion(); - stream << "EDGE_SE3:QUAT " << index(factor3D->key1()) << " " - << index(factor3D->key2()) << " " << p.x() << " " << p.y() << " " + stream << "EDGE_SE3:QUAT " << index(factor3D->key<1>()) << " " + << index(factor3D->key<2>()) << " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w(); diff --git a/gtsam/slam/expressions.h b/gtsam/slam/expressions.h index 3b8ea86d37..d1bfab7f2f 100644 --- a/gtsam/slam/expressions.h +++ b/gtsam/slam/expressions.h @@ -12,6 +12,7 @@ #include #include #include +#include #include namespace gtsam { @@ -26,6 +27,10 @@ inline Point2_ transformTo(const Pose2_& x, const Point2_& p) { return Point2_(x, &Pose2::transformTo, p); } +inline Double_ range(const Point2_& p, const Point2_& q) { + return Double_(Range(), p, q); +} + // 3D Geometry typedef Expression Point3_; @@ -33,6 +38,7 @@ typedef Expression Unit3_; typedef Expression Rot3_; typedef Expression Pose3_; typedef Expression Line3_; +typedef Expression OrientedPlane3_; inline Point3_ transformTo(const Pose3_& x, const Point3_& p) { return Point3_(x, &Pose3::transformTo, p); @@ -48,6 +54,15 @@ inline Line3_ transformTo(const Pose3_ &wTc, const Line3_ &wL) { return Line3_(f, wTc, wL); } +inline Pose3_ transformPoseTo(const Pose3_& p, const Pose3_& q) { + return Pose3_(p, &Pose3::transformPoseTo, q); +} + +inline Point3_ normalize(const Point3_& a){ + Point3 (*f)(const Point3 &, OptionalJacobian<3, 3>) = &normalize; + return Point3_(f, a); +} + inline Point3_ cross(const Point3_& a, const Point3_& b) { Point3 (*f)(const Point3 &, const Point3 &, OptionalJacobian<3, 3>, OptionalJacobian<3, 3>) = ✗ @@ -65,16 +80,28 @@ namespace internal { inline Rot3 rotation(const Pose3& pose, OptionalJacobian<3, 6> H) { return pose.rotation(H); } + +inline Point3 translation(const Pose3& pose, OptionalJacobian<3, 6> H) { + return pose.translation(H); +} } // namespace internal inline Rot3_ rotation(const Pose3_& pose) { return Rot3_(internal::rotation, pose); } +inline Point3_ translation(const Pose3_& pose) { + return Point3_(internal::translation, pose); +} + inline Point3_ rotate(const Rot3_& x, const Point3_& p) { return Point3_(x, &Rot3::rotate, p); } +inline Point3_ point3(const Unit3_& v) { + return Point3_(&Unit3::point3, v); +} + inline Unit3_ rotate(const Rot3_& x, const Unit3_& p) { return Unit3_(x, &Rot3::rotate, p); } @@ -87,6 +114,14 @@ inline Unit3_ unrotate(const Rot3_& x, const Unit3_& p) { return Unit3_(x, &Rot3::unrotate, p); } +inline Double_ distance(const OrientedPlane3_& p) { + return Double_(&OrientedPlane3::distance, p); +} + +inline Unit3_ normal(const OrientedPlane3_& p) { + return Unit3_(&OrientedPlane3::normal, p); +} + // Projection typedef Expression Cal3_S2_; @@ -138,6 +173,11 @@ Point2_ uncalibrate(const Expression& K, const Point2_& xy_hat) { return Point2_(K, &CALIBRATION::uncalibrate, xy_hat); } +template +inline Pose3_ getPose(const Expression > & cam) { + return Pose3_(&PinholeCamera::getPose, cam); +} + /// logmap // TODO(dellaert): Should work but fails because of a type deduction conflict. diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index e19a41b8df..23ddb82073 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -28,10 +28,8 @@ #include #include -#include #include #include -using namespace boost::assign; #include #include @@ -456,8 +454,8 @@ TEST(GeneralSFMFactor, BinaryJacobianFactor) { using namespace noiseModel; Rot2 R = Rot2::fromAngle(0.3); Matrix2 cov = R.matrix() * R.matrix().transpose(); - models += SharedNoiseModel(), Unit::Create(2), // - Isotropic::Sigma(2, 0.5), Constrained::All(2), Gaussian::Covariance(cov); + models = {SharedNoiseModel(), Unit::Create(2), Isotropic::Sigma(2, 0.5), + Constrained::All(2), Gaussian::Covariance(cov)}; } // Now loop over all these noise models diff --git a/gtsam/slam/tests/testInitializePose3.cpp b/gtsam/slam/tests/testInitializePose3.cpp index 995a109fa0..37d65320a5 100644 --- a/gtsam/slam/tests/testInitializePose3.cpp +++ b/gtsam/slam/tests/testInitializePose3.cpp @@ -29,7 +29,6 @@ using namespace std; using namespace gtsam; -using namespace boost::assign; static Symbol x0('x', 0), x1('x', 1), x2('x', 2), x3('x', 3); static SharedNoiseModel model(noiseModel::Isotropic::Sigma(6, 0.1)); @@ -156,7 +155,7 @@ TEST( InitializePose3, orientationsGradientSymbolicGraph ) { /* *************************************************************************** */ TEST( InitializePose3, singleGradient ) { Rot3 R1 = Rot3(); - Matrix M = Matrix3::Zero(); + Matrix M = Z_3x3; M(0,1) = -1; M(1,0) = 1; M(2,2) = 1; Rot3 R2 = Rot3(M); double a = 6.010534238540223; diff --git a/gtsam/slam/tests/testLago.cpp b/gtsam/slam/tests/testLago.cpp index 781317d7a9..5d429796d1 100644 --- a/gtsam/slam/tests/testLago.cpp +++ b/gtsam/slam/tests/testLago.cpp @@ -31,7 +31,6 @@ using namespace std; using namespace gtsam; -using namespace boost::assign; static Symbol x0('x', 0), x1('x', 1), x2('x', 2), x3('x', 3); static SharedNoiseModel model(noiseModel::Isotropic::Sigma(3, 0.1)); @@ -284,9 +283,10 @@ TEST( Lago, largeGraphNoisy_orientations ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const auto key_val: *expected){ - Key k = key_val.key; - EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-5)); + for(const auto key_pose: expected->extract()){ + const Key& k = key_pose.first; + const Pose2& pose = key_pose.second; + EXPECT(assert_equal(pose, actual.at(k), 1e-5)); } } @@ -310,9 +310,10 @@ TEST( Lago, largeGraphNoisy ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const auto key_val: *expected){ - Key k = key_val.key; - EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-2)); + for(const auto key_pose: expected->extract()){ + const Key& k = key_pose.first; + const Pose2& pose = key_pose.second; + EXPECT(assert_equal(pose, actual.at(k), 1e-2)); } } diff --git a/gtsam/slam/tests/testOrientedPlane3Factor.cpp b/gtsam/slam/tests/testOrientedPlane3Factor.cpp index 502c62c11c..bc3f7ce94b 100644 --- a/gtsam/slam/tests/testOrientedPlane3Factor.cpp +++ b/gtsam/slam/tests/testOrientedPlane3Factor.cpp @@ -25,10 +25,6 @@ #include -#include -#include - -using namespace boost::assign; using namespace std::placeholders; using namespace gtsam; using namespace std; diff --git a/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp b/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp index 09ee61c97b..9f39082a5f 100644 --- a/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp +++ b/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp @@ -27,14 +27,11 @@ #include #include -#include -#include #include #include #include using namespace std; -using namespace boost::assign; using namespace gtsam; // F @@ -68,17 +65,15 @@ TEST( regularImplicitSchurFactor, addHessianMultiply ) { Matrix3 P = (E.transpose() * E).inverse(); double alpha = 0.5; - VectorValues xvalues = map_list_of // - (0, Vector::Constant(6, 2))// - (1, Vector::Constant(6, 4))// - (2, Vector::Constant(6, 0))// distractor - (3, Vector::Constant(6, 8)); - - VectorValues yExpected = map_list_of// - (0, Vector::Constant(6, 27))// - (1, Vector::Constant(6, -40))// - (2, Vector::Constant(6, 0))// distractor - (3, Vector::Constant(6, 279)); + VectorValues xvalues{{0, Vector::Constant(6, 2)}, // + {1, Vector::Constant(6, 4)}, // + {2, Vector::Constant(6, 0)}, // distractor + {3, Vector::Constant(6, 8)}}; + + VectorValues yExpected{{0, Vector::Constant(6, 27)}, // + {1, Vector::Constant(6, -40)}, // + {2, Vector::Constant(6, 0)}, // distractor + {3, Vector::Constant(6, 279)}}; // Create full F size_t M=4, m = 3, d = 6; diff --git a/gtsam/slam/tests/testRotateFactor.cpp b/gtsam/slam/tests/testRotateFactor.cpp index b8fd01730e..01bfc866b4 100644 --- a/gtsam/slam/tests/testRotateFactor.cpp +++ b/gtsam/slam/tests/testRotateFactor.cpp @@ -12,11 +12,9 @@ #include #include -#include #include using namespace std; -using namespace boost::assign; using namespace std::placeholders; using namespace gtsam; diff --git a/gtsam/slam/tests/testSerializationInSlam.cpp b/gtsam/slam/tests/testSerializationInSlam.cpp index 6aec8ecb0d..08fb80c0a9 100644 --- a/gtsam/slam/tests/testSerializationInSlam.cpp +++ b/gtsam/slam/tests/testSerializationInSlam.cpp @@ -25,7 +25,6 @@ #include -#include #include namespace { diff --git a/gtsam/slam/tests/testSmartProjectionFactor.cpp b/gtsam/slam/tests/testSmartProjectionFactor.cpp index ecdb5287fd..564b7f6403 100644 --- a/gtsam/slam/tests/testSmartProjectionFactor.cpp +++ b/gtsam/slam/tests/testSmartProjectionFactor.cpp @@ -23,11 +23,8 @@ #include #include #include -#include #include -using namespace boost::assign; - namespace { static const bool isDebugTest = false; static const Symbol l1('l', 1), l2('l', 2), l3('l', 3); @@ -802,9 +799,9 @@ TEST(SmartProjectionFactor, implicitJacobianFactor ) { Implicit9& implicitSchurFactor = dynamic_cast(*gaussianImplicitSchurFactor); - VectorValues x = map_list_of(c1, - (Vector(9) << 1, 2, 3, 4, 5, 6, 7, 8, 9).finished())(c2, - (Vector(9) << 11, 12, 13, 14, 15, 16, 17, 18, 19).finished()); + VectorValues x{ + {c1, (Vector(9) << 1, 2, 3, 4, 5, 6, 7, 8, 9).finished()}, + {c2, (Vector(9) << 11, 12, 13, 14, 15, 16, 17, 18, 19).finished()}}; VectorValues yExpected, yActual; double alpha = 1.0; diff --git a/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp b/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp index 5c38233c18..ba5a53bbb9 100644 --- a/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp +++ b/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp @@ -25,10 +25,8 @@ #include #include #include -#include #include -using namespace boost::assign; using namespace std::placeholders; namespace { @@ -437,7 +435,7 @@ TEST( SmartProjectionPoseFactor, Factors ) { E(2, 0) = 10; E(2, 2) = 1; E(3, 1) = 10; - SmartFactor::FBlocks Fs = list_of(F1)(F2); + SmartFactor::FBlocks Fs {F1, F2}; Vector b(4); b.setZero(); diff --git a/gtsam/slam/tests/testSmartProjectionRigFactor.cpp b/gtsam/slam/tests/testSmartProjectionRigFactor.cpp index b142b20091..6d39eb5b03 100644 --- a/gtsam/slam/tests/testSmartProjectionRigFactor.cpp +++ b/gtsam/slam/tests/testSmartProjectionRigFactor.cpp @@ -25,13 +25,11 @@ #include #include -#include #include #include "smartFactorScenarios.h" #define DISABLE_TIMING -using namespace boost::assign; using namespace std::placeholders; static const double rankTol = 1.0; diff --git a/gtsam/slam/tests/testTriangulationFactor.cpp b/gtsam/slam/tests/testTriangulationFactor.cpp index ad88c88fcc..08d8e66c05 100644 --- a/gtsam/slam/tests/testTriangulationFactor.cpp +++ b/gtsam/slam/tests/testTriangulationFactor.cpp @@ -25,13 +25,10 @@ #include #include -#include -#include #include using namespace std; using namespace gtsam; -using namespace boost::assign; using namespace std::placeholders; // Some common constants diff --git a/gtsam/symbolic/SymbolicBayesNet.h b/gtsam/symbolic/SymbolicBayesNet.h index d9b8ecb576..ef1505108a 100644 --- a/gtsam/symbolic/SymbolicBayesNet.h +++ b/gtsam/symbolic/SymbolicBayesNet.h @@ -60,6 +60,29 @@ namespace gtsam { explicit SymbolicBayesNet(const FactorGraph& graph) : Base(graph) {} + /** + * Constructor that takes an initializer list of shared pointers. + * SymbolicBayesNet bn = {make_shared(), ...}; + */ + SymbolicBayesNet(std::initializer_list> conditionals) + : Base(conditionals) {} + + /// Construct from a single conditional + SymbolicBayesNet(SymbolicConditional&& c) { + push_back(boost::make_shared(c)); + } + + /** + * @brief Add a single conditional and return a reference. + * This allows for chaining, e.g., + * SymbolicBayesNet bn = + * SymbolicBayesNet(SymbolicConditional(...))(SymbolicConditional(...)); + */ + SymbolicBayesNet& operator()(SymbolicConditional&& c) { + push_back(boost::make_shared(c)); + return *this; + } + /// Destructor virtual ~SymbolicBayesNet() {} diff --git a/gtsam/symbolic/SymbolicConditional.cpp b/gtsam/symbolic/SymbolicConditional.cpp index f0d9944b22..fd9cc453d4 100644 --- a/gtsam/symbolic/SymbolicConditional.cpp +++ b/gtsam/symbolic/SymbolicConditional.cpp @@ -15,7 +15,6 @@ * @date Oct 17, 2010 */ -#include #include namespace gtsam { @@ -33,4 +32,15 @@ bool SymbolicConditional::equals(const This& c, double tol) const { return BaseFactor::equals(c) && BaseConditional::equals(c); } +/* ************************************************************************* */ +double SymbolicConditional::logProbability(const HybridValues& c) const { + throw std::runtime_error("SymbolicConditional::logProbability is not implemented"); +} + +/* ************************************************************************* */ +double SymbolicConditional::evaluate(const HybridValues& c) const { + throw std::runtime_error("SymbolicConditional::evaluate is not implemented"); +} + + } // namespace gtsam diff --git a/gtsam/symbolic/SymbolicConditional.h b/gtsam/symbolic/SymbolicConditional.h index 4f49179502..6bd966450e 100644 --- a/gtsam/symbolic/SymbolicConditional.h +++ b/gtsam/symbolic/SymbolicConditional.h @@ -17,10 +17,10 @@ #pragma once -#include -#include #include #include +#include +#include namespace gtsam { @@ -95,13 +95,10 @@ namespace gtsam { return FromIteratorsShared(keys.begin(), keys.end(), nrFrontals); } - ~SymbolicConditional() override {} - /// Copy this object as its actual derived type. SymbolicFactor::shared_ptr clone() const { return boost::make_shared(*this); } /// @} - /// @name Testable /// @{ @@ -114,6 +111,19 @@ namespace gtsam { bool equals(const This& c, double tol = 1e-9) const; /// @} + /// @name HybridValues methods. + /// @{ + + /// logProbability throws exception, symbolic. + double logProbability(const HybridValues& x) const override; + + /// evaluate throws exception, symbolic. + double evaluate(const HybridValues& x) const override; + + using Conditional::operator(); // Expose evaluate(const HybridValues&) method.. + using SymbolicFactor::error; // Expose error(const HybridValues&) method.. + + /// @} private: /** Serialization function */ diff --git a/gtsam/symbolic/SymbolicFactor.cpp b/gtsam/symbolic/SymbolicFactor.cpp index 327de0c107..671b71f932 100644 --- a/gtsam/symbolic/SymbolicFactor.cpp +++ b/gtsam/symbolic/SymbolicFactor.cpp @@ -26,6 +26,11 @@ using namespace std; namespace gtsam { + /* ************************************************************************* */ + double SymbolicFactor::error(const HybridValues& c) const { + throw std::runtime_error("SymbolicFactor::error is not implemented"); + } + /* ************************************************************************* */ std::pair, boost::shared_ptr > EliminateSymbolic(const SymbolicFactorGraph& factors, const Ordering& keys) diff --git a/gtsam/symbolic/SymbolicFactor.h b/gtsam/symbolic/SymbolicFactor.h index 767998d22c..12b3374c82 100644 --- a/gtsam/symbolic/SymbolicFactor.h +++ b/gtsam/symbolic/SymbolicFactor.h @@ -22,7 +22,6 @@ #include #include -#include #include #include @@ -31,6 +30,7 @@ namespace gtsam { // Forward declarations class SymbolicConditional; + class HybridValues; class Ordering; /** SymbolicFactor represents a symbolic factor that specifies graph topology but is not @@ -47,7 +47,7 @@ namespace gtsam { /** Overriding the shared_ptr typedef */ typedef boost::shared_ptr shared_ptr; - /// @name Standard Interface + /// @name Standard Constructors /// @{ /** Default constructor for I/O */ @@ -55,27 +55,27 @@ namespace gtsam { /** Construct unary factor */ explicit SymbolicFactor(Key j) : - Base(boost::assign::cref_list_of<1>(j)) {} + Base(KeyVector{j}) {} /** Construct binary factor */ SymbolicFactor(Key j1, Key j2) : - Base(boost::assign::cref_list_of<2>(j1)(j2)) {} + Base(KeyVector{j1, j2}) {} /** Construct ternary factor */ SymbolicFactor(Key j1, Key j2, Key j3) : - Base(boost::assign::cref_list_of<3>(j1)(j2)(j3)) {} + Base(KeyVector{j1, j2, j3}) {} /** Construct 4-way factor */ SymbolicFactor(Key j1, Key j2, Key j3, Key j4) : - Base(boost::assign::cref_list_of<4>(j1)(j2)(j3)(j4)) {} + Base(KeyVector{j1, j2, j3, j4}) {} /** Construct 5-way factor */ SymbolicFactor(Key j1, Key j2, Key j3, Key j4, Key j5) : - Base(boost::assign::cref_list_of<5>(j1)(j2)(j3)(j4)(j5)) {} + Base(KeyVector{j1, j2, j3, j4, j5}) {} /** Construct 6-way factor */ SymbolicFactor(Key j1, Key j2, Key j3, Key j4, Key j5, Key j6) : - Base(boost::assign::cref_list_of<6>(j1)(j2)(j3)(j4)(j5)(j6)) {} + Base(KeyVector{j1, j2, j3, j4, j5, j6}) {} /** Create symbolic version of any factor */ explicit SymbolicFactor(const Factor& factor) : Base(factor.keys()) {} @@ -107,10 +107,9 @@ namespace gtsam { } /// @} - /// @name Advanced Constructors /// @{ - public: + /** Constructor from a collection of keys */ template static SymbolicFactor FromIterators(KEYITERATOR beginKey, KEYITERATOR endKey) { @@ -125,15 +124,15 @@ namespace gtsam { return result; } - /** Constructor from a collection of keys - compatible with boost::assign::list_of and - * boost::assign::cref_list_of */ + /** Constructor from a collection of keys - compatible with boost assign::list_of and + * boost assign::cref_list_of */ template static SymbolicFactor FromKeys(const CONTAINER& keys) { return SymbolicFactor(Base::FromKeys(keys)); } - /** Constructor from a collection of keys - compatible with boost::assign::list_of and - * boost::assign::cref_list_of */ + /** Constructor from a collection of keys - compatible with boost assign::list_of and + * boost assign::cref_list_of */ template static SymbolicFactor::shared_ptr FromKeysShared(const CONTAINER& keys) { return FromIteratorsShared(keys.begin(), keys.end()); @@ -144,6 +143,9 @@ namespace gtsam { /// @name Standard Interface /// @{ + /// The `error` method throws an exception. + double error(const HybridValues& c) const override; + /** Eliminate the variables in \c keys, in the order specified in \c keys, returning a * conditional and marginal. */ std::pair, boost::shared_ptr > diff --git a/gtsam/symbolic/SymbolicFactorGraph.h b/gtsam/symbolic/SymbolicFactorGraph.h index 36379fd831..1d4292cbfb 100644 --- a/gtsam/symbolic/SymbolicFactorGraph.h +++ b/gtsam/symbolic/SymbolicFactorGraph.h @@ -46,6 +46,12 @@ namespace gtsam { static std::pair, boost::shared_ptr > DefaultEliminate(const FactorGraphType& factors, const Ordering& keys) { return EliminateSymbolic(factors, keys); } + /// The default ordering generation function + static Ordering DefaultOrderingFunc( + const FactorGraphType& graph, + boost::optional variableIndex) { + return Ordering::Colamd(*variableIndex); + } }; /* ************************************************************************* */ @@ -81,6 +87,30 @@ namespace gtsam { template SymbolicFactorGraph(const FactorGraph& graph) : Base(graph) {} + /** + * Constructor that takes an initializer list of shared pointers. + * FactorGraph fg = {make_shared(), ...}; + */ + SymbolicFactorGraph( + std::initializer_list> sharedFactors) + : Base(sharedFactors) {} + + /// Construct from a single factor + SymbolicFactorGraph(SymbolicFactor&& c) { + push_back(boost::make_shared(c)); + } + + /** + * @brief Add a single factor and return a reference. + * This allows for chaining, e.g., + * SymbolicFactorGraph bn = + * SymbolicFactorGraph(SymbolicFactor(...))(SymbolicFactor(...)); + */ + SymbolicFactorGraph& operator()(SymbolicFactor&& c) { + push_back(boost::make_shared(c)); + return *this; + } + /// Destructor virtual ~SymbolicFactorGraph() {} diff --git a/gtsam/symbolic/tests/symbolicExampleGraphs.h b/gtsam/symbolic/tests/symbolicExampleGraphs.h index b70595ac91..716f502a4b 100644 --- a/gtsam/symbolic/tests/symbolicExampleGraphs.h +++ b/gtsam/symbolic/tests/symbolicExampleGraphs.h @@ -1,6 +1,6 @@ /* ---------------------------------------------------------------------------- - * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * GTSAM Copyright 2010-2023, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /* - * @file symbolicExampleGraphs.cpp + * @file symbolicExampleGraphs.h * @date sept 15, 2012 * @author Frank Dellaert * @author Michael Kaess @@ -20,58 +20,74 @@ #pragma once +#include #include #include #include #include #include #include -#include namespace gtsam { namespace { - const SymbolicFactorGraph simpleTestGraph1 = boost::assign::list_of - (boost::make_shared(0,1)) - (boost::make_shared(0,2)) - (boost::make_shared(1,4)) - (boost::make_shared(2,4)) - (boost::make_shared(3,4)); - - const SymbolicBayesNet simpleTestGraph1BayesNet = boost::assign::list_of - (boost::make_shared(0,1,2)) - (boost::make_shared(1,2,4)) - (boost::make_shared(2,4)) - (boost::make_shared(3,4)) - (boost::make_shared(4)); - - const SymbolicFactorGraph simpleTestGraph2 = boost::assign::list_of - (boost::make_shared(0,1)) - (boost::make_shared(0,2)) - (boost::make_shared(1,3)) - (boost::make_shared(1,4)) - (boost::make_shared(2,3)) - (boost::make_shared(4,5)); + // A small helper class to replace Boost's `list_of` function. + template + struct ChainedVector { + using Result = std::vector::type>; + Result result; + + ChainedVector(const T& c) { result.push_back(c); } + + ChainedVector& operator()(const T& c) { + result.push_back(c); + return *this; + } + + operator Result() { return result; } + }; + + const SymbolicFactorGraph simpleTestGraph1 { + boost::make_shared(0,1), + boost::make_shared(0,2), + boost::make_shared(1,4), + boost::make_shared(2,4), + boost::make_shared(3,4)}; + + const SymbolicBayesNet simpleTestGraph1BayesNet { + boost::make_shared(0,1,2), + boost::make_shared(1,2,4), + boost::make_shared(2,4), + boost::make_shared(3,4), + boost::make_shared(4)}; + + const SymbolicFactorGraph simpleTestGraph2 { + boost::make_shared(0,1), + boost::make_shared(0,2), + boost::make_shared(1,3), + boost::make_shared(1,4), + boost::make_shared(2,3), + boost::make_shared(4,5)}; /** 1 - 0 - 2 - 3 */ - const SymbolicFactorGraph simpleChain = boost::assign::list_of - (boost::make_shared(1,0)) - (boost::make_shared(0,2)) - (boost::make_shared(2,3)); + const SymbolicFactorGraph simpleChain { + boost::make_shared(1,0), + boost::make_shared(0,2), + boost::make_shared(2,3)}; /* ************************************************************************* * - * 2 3 - * 0 1 : 2 - ****************************************************************************/ + * 2 3 + * 0 1 : 2 + ****************************************************************************/ SymbolicBayesTree __simpleChainBayesTree() { SymbolicBayesTree result; result.insertRoot(boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(boost::assign::list_of(2)(3), 2)))); + boost::make_shared( + SymbolicConditional::FromKeys(KeyVector{2,3}, 2)))); result.addClique(boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(boost::assign::list_of(0)(1)(2), 2))), - result.roots().front()); + boost::make_shared( + SymbolicConditional::FromKeys(KeyVector{0,1,2}, 2))), + result.roots().front()); return result; } @@ -79,52 +95,67 @@ namespace gtsam { /* ************************************************************************* */ // Keys for ASIA example from the tutorial with A and D evidence - const Key _X_=gtsam::symbol_shorthand::X(0), _T_=gtsam::symbol_shorthand::T(0), - _S_=gtsam::symbol_shorthand::S(0), _E_=gtsam::symbol_shorthand::E(0), - _L_=gtsam::symbol_shorthand::L(0), _B_=gtsam::symbol_shorthand::B(0); + const Key _X_ = symbol_shorthand::X(0), + _T_ = symbol_shorthand::T(0), + _S_ = symbol_shorthand::S(0), + _E_ = symbol_shorthand::E(0), + _L_ = symbol_shorthand::L(0), + _B_ = symbol_shorthand::B(0); // Factor graph for Asia example - const SymbolicFactorGraph asiaGraph = boost::assign::list_of - (boost::make_shared(_T_)) - (boost::make_shared(_S_)) - (boost::make_shared(_T_, _E_, _L_)) - (boost::make_shared(_L_, _S_)) - (boost::make_shared(_S_, _B_)) - (boost::make_shared(_E_, _B_)) - (boost::make_shared(_E_, _X_)); - - const SymbolicBayesNet asiaBayesNet = boost::assign::list_of - (boost::make_shared(_T_, _E_, _L_)) - (boost::make_shared(_X_, _E_)) - (boost::make_shared(_E_, _B_, _L_)) - (boost::make_shared(_S_, _B_, _L_)) - (boost::make_shared(_L_, _B_)) - (boost::make_shared(_B_)); + const SymbolicFactorGraph asiaGraph = { + boost::make_shared(_T_), + boost::make_shared(_S_), + boost::make_shared(_T_, _E_, _L_), + boost::make_shared(_L_, _S_), + boost::make_shared(_S_, _B_), + boost::make_shared(_E_, _B_), + boost::make_shared(_E_, _X_)}; + + const SymbolicBayesNet asiaBayesNet = { + boost::make_shared(_T_, _E_, _L_), + boost::make_shared(_X_, _E_), + boost::make_shared(_E_, _B_, _L_), + boost::make_shared(_S_, _B_, _L_), + boost::make_shared(_L_, _B_), + boost::make_shared(_B_)}; + /* ************************************************************************* */ + // Allow creating Cliques and Keys in `list_of` chaining style: + using sharedClique = SymbolicBayesTreeClique::shared_ptr; + using Children = ChainedVector; + using Keys = ChainedVector; + + inline sharedClique LeafClique(const Keys::Result& keys, + DenseIndex nrFrontals) { + return boost::make_shared( + boost::make_shared( + SymbolicConditional::FromKeys(keys, nrFrontals))); + } + + inline sharedClique NodeClique(const Keys::Result& keys, + DenseIndex nrFrontals, + const Children::Result& children) { + sharedClique clique = LeafClique(keys, nrFrontals); + clique->children.assign(children.begin(), children.end()); + for (auto&& child : children) child->parent_ = clique; + return clique; + } + + /* ************************************************************************* */ + // BayesTree for Asia example SymbolicBayesTree __asiaBayesTree() { SymbolicBayesTree result; - result.insertRoot(boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(boost::assign::list_of(_E_)(_L_)(_B_), 3)))); - result.addClique(boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(boost::assign::list_of(_S_)(_B_) (_L_), 1))), - result.roots().front()); - result.addClique(boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(boost::assign::list_of(_T_)(_E_)(_L_), 1))), - result.roots().front()); - result.addClique(boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(boost::assign::list_of(_X_)(_E_), 1))), - result.roots().front()); + result.insertRoot(LeafClique({_E_, _L_, _B_}, 3)); + result.addClique(LeafClique({_S_, _B_, _L_}, 1), result.roots().front()); + result.addClique(LeafClique({_T_, _E_, _L_}, 1), result.roots().front()); + result.addClique(LeafClique({_X_, _E_}, 1), result.roots().front()); return result; } const SymbolicBayesTree asiaBayesTree = __asiaBayesTree(); /* ************************************************************************* */ - const Ordering asiaOrdering = boost::assign::list_of(_X_)(_T_)(_S_)(_E_)(_L_)(_B_); - - } -} + const Ordering asiaOrdering{_X_, _T_, _S_, _E_, _L_, _B_}; + } // namespace +} // namespace gtsam diff --git a/gtsam/symbolic/tests/testSymbolicBayesTree.cpp b/gtsam/symbolic/tests/testSymbolicBayesTree.cpp index ee9b41a5aa..0095c9293f 100644 --- a/gtsam/symbolic/tests/testSymbolicBayesTree.cpp +++ b/gtsam/symbolic/tests/testSymbolicBayesTree.cpp @@ -17,16 +17,12 @@ * @author Viorela Ila */ -#include -#include #include +#include +#include #include -#include -#include -#include #include -using namespace boost::assign; using boost::adaptors::indirected; #include @@ -38,37 +34,8 @@ using namespace gtsam::symbol_shorthand; static bool debug = false; -namespace { - - /* ************************************************************************* */ - // Helper functions for below - template - SymbolicBayesTreeClique::shared_ptr MakeClique(const KEYS& keys, DenseIndex nrFrontals) - { - return boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(keys, nrFrontals))); - } - - template - SymbolicBayesTreeClique::shared_ptr MakeClique( - const KEYS& keys, DenseIndex nrFrontals, const CHILDREN& children) - { - SymbolicBayesTreeClique::shared_ptr clique = - boost::make_shared( - boost::make_shared( - SymbolicConditional::FromKeys(keys, nrFrontals))); - clique->children.assign(children.begin(), children.end()); - for(typename CHILDREN::const_iterator child = children.begin(); child != children.end(); ++child) - (*child)->parent_ = clique; - return clique; - } - -} - /* ************************************************************************* */ -TEST(SymbolicBayesTree, clear) -{ +TEST(SymbolicBayesTree, clear) { SymbolicBayesTree bayesTree = asiaBayesTree; bayesTree.clear(); @@ -79,34 +46,35 @@ TEST(SymbolicBayesTree, clear) } /* ************************************************************************* */ -TEST(SymbolicBayesTree, clique_structure) -{ +TEST(SymbolicBayesTree, clique_structure) { // l1 l2 // / | / | // x1 --- x2 --- x3 --- x4 --- x5 // \ | // l3 SymbolicFactorGraph graph; - graph += SymbolicFactor(X(1), L(1)); - graph += SymbolicFactor(X(1), X(2)); - graph += SymbolicFactor(X(2), L(1)); - graph += SymbolicFactor(X(2), X(3)); - graph += SymbolicFactor(X(3), X(4)); - graph += SymbolicFactor(X(4), L(2)); - graph += SymbolicFactor(X(4), X(5)); - graph += SymbolicFactor(L(2), X(5)); - graph += SymbolicFactor(X(4), L(3)); - graph += SymbolicFactor(X(5), L(3)); + graph.emplace_shared(X(1), L(1)); + graph.emplace_shared(X(1), X(2)); + graph.emplace_shared(X(2), L(1)); + graph.emplace_shared(X(2), X(3)); + graph.emplace_shared(X(3), X(4)); + graph.emplace_shared(X(4), L(2)); + graph.emplace_shared(X(4), X(5)); + graph.emplace_shared(L(2), X(5)); + graph.emplace_shared(X(4), L(3)); + graph.emplace_shared(X(5), L(3)); SymbolicBayesTree expected; expected.insertRoot( - MakeClique(list_of(X(2)) (X(3)), 2, list_of - (MakeClique(list_of(X(4)) (X(3)), 1, list_of - (MakeClique(list_of(X(5)) (L(2)) (X(4)), 2, list_of - (MakeClique(list_of(L(3)) (X(4)) (X(5)), 1)))))) - (MakeClique(list_of(X(1)) (L(1)) (X(2)), 2)))); + NodeClique(Keys(X(2))(X(3)), 2, + Children(NodeClique( + Keys(X(4))(X(3)), 1, + Children(NodeClique( + Keys(X(5))(L(2))(X(4)), 2, + Children(LeafClique(Keys(L(3))(X(4))(X(5)), 1))))))( + LeafClique(Keys(X(1))(L(1))(X(2)), 2)))); - Ordering order = list_of(X(1)) (L(3)) (L(1)) (X(5)) (X(2)) (L(2)) (X(4)) (X(3)); + Ordering order{X(1), L(3), L(1), X(5), X(2), L(2), X(4), X(3)}; SymbolicBayesTree actual = *graph.eliminateMultifrontal(order); @@ -120,56 +88,51 @@ Bayes Tree for testing conversion to a forest of orphans needed for incremental. D|C F|E */ /* ************************************************************************* */ -TEST( BayesTree, removePath ) -{ - const Key _A_=A(0), _B_=B(0), _C_=C(0), _D_=D(0), _E_=E(0), _F_=F(0); +TEST(BayesTree, removePath) { + const Key _A_ = A(0), _B_ = B(0), _C_ = C(0), _D_ = D(0), _E_ = E(0), + _F_ = F(0); SymbolicBayesTree bayesTreeOrig; - bayesTreeOrig.insertRoot( - MakeClique(list_of(_A_)(_B_), 2, list_of - (MakeClique(list_of(_C_)(_A_), 1, list_of - (MakeClique(list_of(_D_)(_C_), 1)))) - (MakeClique(list_of(_E_)(_B_), 1, list_of - (MakeClique(list_of(_F_)(_E_), 1)))))); + auto left = NodeClique(Keys(_C_)(_A_), 1, {LeafClique(Keys(_D_)(_C_), 1)}); + auto right = NodeClique(Keys(_E_)(_B_), 1, {LeafClique(Keys(_F_)(_E_), 1)}); + bayesTreeOrig.insertRoot(NodeClique(Keys(_A_)(_B_), 2, {left, right})); SymbolicBayesTree bayesTree = bayesTreeOrig; // remove C, expected outcome: factor graph with ABC, // Bayes Tree now contains two orphan trees: D|C and E|B,F|E SymbolicFactorGraph expected; - expected += SymbolicFactor(_A_,_B_); - expected += SymbolicFactor(_C_,_A_); - SymbolicBayesTree::Cliques expectedOrphans; - expectedOrphans += bayesTree[_D_], bayesTree[_E_]; + expected.emplace_shared(_A_, _B_); + expected.emplace_shared(_C_, _A_); + SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_D_], bayesTree[_E_]}; SymbolicBayesNet bn; SymbolicBayesTree::Cliques orphans; bayesTree.removePath(bayesTree[_C_], &bn, &orphans); SymbolicFactorGraph factors(bn); CHECK(assert_equal(expected, factors)); - CHECK(assert_container_equal(expectedOrphans|indirected, orphans|indirected)); + CHECK(assert_container_equal(expectedOrphans | indirected, + orphans | indirected)); bayesTree = bayesTreeOrig; // remove E: factor graph with EB; E|B removed from second orphan tree SymbolicFactorGraph expected2; - expected2 += SymbolicFactor(_A_,_B_); - expected2 += SymbolicFactor(_E_,_B_); - SymbolicBayesTree::Cliques expectedOrphans2; - expectedOrphans2 += bayesTree[_F_]; - expectedOrphans2 += bayesTree[_C_]; + expected2.emplace_shared(_A_, _B_); + expected2.emplace_shared(_E_, _B_); + SymbolicBayesTree::Cliques expectedOrphans2{bayesTree[_F_], bayesTree[_C_]}; SymbolicBayesNet bn2; SymbolicBayesTree::Cliques orphans2; bayesTree.removePath(bayesTree[_E_], &bn2, &orphans2); SymbolicFactorGraph factors2(bn2); CHECK(assert_equal(expected2, factors2)); - CHECK(assert_container_equal(expectedOrphans2|indirected, orphans2|indirected)); + CHECK(assert_container_equal(expectedOrphans2 | indirected, + orphans2 | indirected)); } /* ************************************************************************* */ -TEST( BayesTree, removePath2 ) -{ +TEST(BayesTree, removePath2) { SymbolicBayesTree bayesTree = asiaBayesTree; // Call remove-path with clique B @@ -180,16 +143,16 @@ TEST( BayesTree, removePath2 ) // Check expected outcome SymbolicFactorGraph expected; - expected += SymbolicFactor(_E_,_L_,_B_); + expected.emplace_shared(_E_, _L_, _B_); CHECK(assert_equal(expected, factors)); - SymbolicBayesTree::Cliques expectedOrphans; - expectedOrphans += bayesTree[_S_], bayesTree[_T_], bayesTree[_X_]; - CHECK(assert_container_equal(expectedOrphans|indirected, orphans|indirected)); + SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_S_], bayesTree[_T_], + bayesTree[_X_]}; + CHECK(assert_container_equal(expectedOrphans | indirected, + orphans | indirected)); } /* ************************************************************************* */ -TEST(BayesTree, removePath3) -{ +TEST(BayesTree, removePath3) { SymbolicBayesTree bayesTree = asiaBayesTree; // Call remove-path with clique T @@ -200,199 +163,183 @@ TEST(BayesTree, removePath3) // Check expected outcome SymbolicFactorGraph expected; - expected += SymbolicFactor(_E_, _L_, _B_); - expected += SymbolicFactor(_T_, _E_, _L_); + expected.emplace_shared(_E_, _L_, _B_); + expected.emplace_shared(_T_, _E_, _L_); CHECK(assert_equal(expected, factors)); - SymbolicBayesTree::Cliques expectedOrphans; - expectedOrphans += bayesTree[_S_], bayesTree[_X_]; - CHECK(assert_container_equal(expectedOrphans|indirected, orphans|indirected)); + SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_S_], bayesTree[_X_]}; + CHECK(assert_container_equal(expectedOrphans | indirected, + orphans | indirected)); } -void getAllCliques(const SymbolicBayesTree::sharedClique& subtree, SymbolicBayesTree::Cliques& cliques) { +void getAllCliques(const SymbolicBayesTree::sharedClique& subtree, + SymbolicBayesTree::Cliques& cliques) { // Check if subtree exists if (subtree) { cliques.push_back(subtree); // Recursive call over all child cliques - for(SymbolicBayesTree::sharedClique& childClique: subtree->children) { - getAllCliques(childClique,cliques); + for (SymbolicBayesTree::sharedClique& childClique : subtree->children) { + getAllCliques(childClique, cliques); } } } /* ************************************************************************* */ -TEST( BayesTree, shortcutCheck ) -{ - const Key _A_=6, _B_=5, _C_=4, _D_=3, _E_=2, _F_=1, _G_=0; - SymbolicFactorGraph chain = list_of - (SymbolicFactor(_A_)) - (SymbolicFactor(_B_, _A_)) - (SymbolicFactor(_C_, _A_)) - (SymbolicFactor(_D_, _C_)) - (SymbolicFactor(_E_, _B_)) - (SymbolicFactor(_F_, _E_)) - (SymbolicFactor(_G_, _F_)); - Ordering ordering(list_of(_G_)(_F_)(_E_)(_D_)(_C_)(_B_)(_A_)); +TEST(BayesTree, shortcutCheck) { + const Key _A_ = 6, _B_ = 5, _C_ = 4, _D_ = 3, _E_ = 2, _F_ = 1, _G_ = 0; + auto chain = SymbolicFactorGraph(SymbolicFactor(_A_)) // + (SymbolicFactor(_B_, _A_)) // + (SymbolicFactor(_C_, _A_)) // + (SymbolicFactor(_D_, _C_)) // + (SymbolicFactor(_E_, _B_)) // + (SymbolicFactor(_F_, _E_)) // + (SymbolicFactor(_G_, _F_)); + Ordering ordering{_G_, _F_, _E_, _D_, _C_, _B_, _A_}; SymbolicBayesTree bayesTree = *chain.eliminateMultifrontal(ordering); - //bayesTree.saveGraph("BT1.dot"); + // bayesTree.saveGraph("BT1.dot"); SymbolicBayesTree::sharedClique rootClique = bayesTree.roots().front(); - //rootClique->printTree(); + // rootClique->printTree(); SymbolicBayesTree::Cliques allCliques; - getAllCliques(rootClique,allCliques); + getAllCliques(rootClique, allCliques); - for(SymbolicBayesTree::sharedClique& clique: allCliques) { - //clique->print("Clique#"); + for (SymbolicBayesTree::sharedClique& clique : allCliques) { + // clique->print("Clique#"); SymbolicBayesNet bn = clique->shortcut(rootClique); - //bn.print("Shortcut:\n"); - //cout << endl; + // bn.print("Shortcut:\n"); + // cout << endl; } // Check if all the cached shortcuts are cleared rootClique->deleteCachedShortcuts(); - for(SymbolicBayesTree::sharedClique& clique: allCliques) { + for (SymbolicBayesTree::sharedClique& clique : allCliques) { bool notCleared = clique->cachedSeparatorMarginal().is_initialized(); - CHECK( notCleared == false); + CHECK(notCleared == false); } EXPECT_LONGS_EQUAL(0, (long)rootClique->numCachedSeparatorMarginals()); -// for(SymbolicBayesTree::sharedClique& clique: allCliques) { -// clique->print("Clique#"); -// if(clique->cachedShortcut()){ -// bn = clique->cachedShortcut().get(); -// bn.print("Shortcut:\n"); -// } -// else -// cout << "Not Initialized" << endl; -// cout << endl; -// } + // for(SymbolicBayesTree::sharedClique& clique: allCliques) { + // clique->print("Clique#"); + // if(clique->cachedShortcut()){ + // bn = clique->cachedShortcut().get(); + // bn.print("Shortcut:\n"); + // } + // else + // cout << "Not Initialized" << endl; + // cout << endl; + // } } /* ************************************************************************* */ -TEST( BayesTree, removeTop ) -{ +TEST(BayesTree, removeTop) { SymbolicBayesTree bayesTree = asiaBayesTree; // create a new factor to be inserted - //boost::shared_ptr newFactor(new IndexFactor(_S_,_B_)); + // boost::shared_ptr newFactor(new IndexFactor(_S_,_B_)); // Remove the contaminated part of the Bayes tree SymbolicBayesNet bn; SymbolicBayesTree::Cliques orphans; - bayesTree.removeTop(list_of(_B_)(_S_), &bn, &orphans); + bayesTree.removeTop(Keys(_B_)(_S_), &bn, &orphans); // Check expected outcome SymbolicBayesNet expected; - expected += SymbolicConditional::FromKeys(list_of(_E_)(_L_)(_B_), 3); - expected += SymbolicConditional::FromKeys(list_of(_S_)(_B_)(_L_), 1); + expected += SymbolicConditional::FromKeys(Keys(_E_)(_L_)(_B_), 3); + expected += SymbolicConditional::FromKeys(Keys(_S_)(_B_)(_L_), 1); CHECK(assert_equal(expected, bn)); - SymbolicBayesTree::Cliques expectedOrphans; - expectedOrphans += bayesTree[_T_], bayesTree[_X_]; - CHECK(assert_container_equal(expectedOrphans|indirected, orphans|indirected)); + SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_T_], bayesTree[_X_]}; + CHECK(assert_container_equal(expectedOrphans | indirected, + orphans | indirected)); // Try removeTop again with a factor that should not change a thing - //boost::shared_ptr newFactor2(new IndexFactor(_B_)); + // boost::shared_ptr newFactor2(new IndexFactor(_B_)); SymbolicBayesNet bn2; SymbolicBayesTree::Cliques orphans2; - bayesTree.removeTop(list_of(_B_), &bn2, &orphans2); + bayesTree.removeTop(Keys(_B_), &bn2, &orphans2); SymbolicFactorGraph factors2(bn2); SymbolicFactorGraph expected2; CHECK(assert_equal(expected2, factors2)); SymbolicBayesTree::Cliques expectedOrphans2; - CHECK(assert_container_equal(expectedOrphans2|indirected, orphans2|indirected)); + CHECK(assert_container_equal(expectedOrphans2 | indirected, + orphans2 | indirected)); } /* ************************************************************************* */ -TEST( BayesTree, removeTop2 ) -{ +TEST(BayesTree, removeTop2) { SymbolicBayesTree bayesTree = asiaBayesTree; // create two factors to be inserted - //SymbolicFactorGraph newFactors; - //newFactors.push_factor(_B_); - //newFactors.push_factor(_S_); + // SymbolicFactorGraph newFactors; + // newFactors.push_factor(_B_); + // newFactors.push_factor(_S_); // Remove the contaminated part of the Bayes tree SymbolicBayesNet bn; SymbolicBayesTree::Cliques orphans; - bayesTree.removeTop(list_of(_T_), &bn, &orphans); + bayesTree.removeTop(Keys(_T_), &bn, &orphans); // Check expected outcome - SymbolicBayesNet expected = list_of - (SymbolicConditional::FromKeys(list_of(_E_)(_L_)(_B_), 3)) - (SymbolicConditional::FromKeys(list_of(_T_)(_E_)(_L_), 1)); + auto expected = SymbolicBayesNet( + SymbolicConditional::FromKeys(Keys(_E_)(_L_)(_B_), 3))( + SymbolicConditional::FromKeys(Keys(_T_)(_E_)(_L_), 1)); CHECK(assert_equal(expected, bn)); - SymbolicBayesTree::Cliques expectedOrphans; - expectedOrphans += bayesTree[_S_], bayesTree[_X_]; - CHECK(assert_container_equal(expectedOrphans|indirected, orphans|indirected)); + SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_S_], bayesTree[_X_]}; + CHECK(assert_container_equal(expectedOrphans | indirected, + orphans | indirected)); } /* ************************************************************************* */ -TEST( BayesTree, removeTop3 ) -{ - SymbolicFactorGraph graph = list_of - (SymbolicFactor(L(5))) - (SymbolicFactor(X(4), L(5))) - (SymbolicFactor(X(2), X(4))) - (SymbolicFactor(X(3), X(2))); - Ordering ordering(list_of (X(3)) (X(2)) (X(4)) (L(5)) ); +TEST(BayesTree, removeTop3) { + auto graph = SymbolicFactorGraph(SymbolicFactor(L(5)))(SymbolicFactor( + X(4), L(5)))(SymbolicFactor(X(2), X(4)))(SymbolicFactor(X(3), X(2))); + Ordering ordering{X(3), X(2), X(4), L(5)}; SymbolicBayesTree bayesTree = *graph.eliminateMultifrontal(ordering); // remove all SymbolicBayesNet bn; SymbolicBayesTree::Cliques orphans; - bayesTree.removeTop(list_of(L(5))(X(4))(X(2))(X(3)), &bn, &orphans); + bayesTree.removeTop(Keys(L(5))(X(4))(X(2))(X(3)), &bn, &orphans); - SymbolicBayesNet expectedBn = list_of - (SymbolicConditional::FromKeys(list_of(X(4))(L(5)), 2)) - (SymbolicConditional(X(2), X(4))) - (SymbolicConditional(X(3), X(2))); + auto expectedBn = SymbolicBayesNet( + SymbolicConditional::FromKeys(Keys(X(4))(L(5)), 2))( + SymbolicConditional(X(2), X(4)))(SymbolicConditional(X(3), X(2))); EXPECT(assert_equal(expectedBn, bn)); EXPECT(orphans.empty()); } /* ************************************************************************* */ -TEST( BayesTree, removeTop4 ) -{ - SymbolicFactorGraph graph = list_of - (SymbolicFactor(L(5))) - (SymbolicFactor(X(4), L(5))) - (SymbolicFactor(X(2), X(4))) - (SymbolicFactor(X(3), X(2))); - Ordering ordering(list_of (X(3)) (X(2)) (X(4)) (L(5)) ); +TEST(BayesTree, removeTop4) { + auto graph = SymbolicFactorGraph(SymbolicFactor(L(5)))(SymbolicFactor( + X(4), L(5)))(SymbolicFactor(X(2), X(4)))(SymbolicFactor(X(3), X(2))); + Ordering ordering{X(3), X(2), X(4), L(5)}; SymbolicBayesTree bayesTree = *graph.eliminateMultifrontal(ordering); // remove all SymbolicBayesNet bn; SymbolicBayesTree::Cliques orphans; - bayesTree.removeTop(list_of(X(2))(L(5))(X(4))(X(3)), &bn, &orphans); + bayesTree.removeTop(Keys(X(2))(L(5))(X(4))(X(3)), &bn, &orphans); - SymbolicBayesNet expectedBn = list_of - (SymbolicConditional::FromKeys(list_of(X(4))(L(5)), 2)) - (SymbolicConditional(X(2), X(4))) - (SymbolicConditional(X(3), X(2))); + auto expectedBn = SymbolicBayesNet( + SymbolicConditional::FromKeys(Keys(X(4))(L(5)), 2))( + SymbolicConditional(X(2), X(4)))(SymbolicConditional(X(3), X(2))); EXPECT(assert_equal(expectedBn, bn)); EXPECT(orphans.empty()); } /* ************************************************************************* */ -TEST( BayesTree, removeTop5 ) -{ +TEST(BayesTree, removeTop5) { // Remove top called with variables that are not in the Bayes tree - SymbolicFactorGraph graph = list_of - (SymbolicFactor(L(5))) - (SymbolicFactor(X(4), L(5))) - (SymbolicFactor(X(2), X(4))) - (SymbolicFactor(X(3), X(2))); - Ordering ordering(list_of (X(3)) (X(2)) (X(4)) (L(5)) ); + auto graph = SymbolicFactorGraph(SymbolicFactor(L(5)))(SymbolicFactor( + X(4), L(5)))(SymbolicFactor(X(2), X(4)))(SymbolicFactor(X(3), X(2))); + Ordering ordering{X(3), X(2), X(4), L(5)}; SymbolicBayesTree bayesTree = *graph.eliminateMultifrontal(ordering); // Remove nonexistant SymbolicBayesNet bn; SymbolicBayesTree::Cliques orphans; - bayesTree.removeTop(list_of(X(10)), &bn, &orphans); + bayesTree.removeTop(Keys(X(10)), &bn, &orphans); SymbolicBayesNet expectedBn; EXPECT(assert_equal(expectedBn, bn)); @@ -400,29 +347,28 @@ TEST( BayesTree, removeTop5 ) } /* ************************************************************************* */ -TEST( SymbolicBayesTree, thinTree ) { - - // create a thin-tree Bayesnet, a la Jean-Guillaume +TEST(SymbolicBayesTree, thinTree) { + // create a thin-tree Bayes net, a la Jean-Guillaume SymbolicBayesNet bayesNet; - bayesNet.push_back(boost::make_shared(14)); + bayesNet.emplace_shared(14); - bayesNet.push_back(boost::make_shared(13, 14)); - bayesNet.push_back(boost::make_shared(12, 14)); + bayesNet.emplace_shared(13, 14); + bayesNet.emplace_shared(12, 14); - bayesNet.push_back(boost::make_shared(11, 13, 14)); - bayesNet.push_back(boost::make_shared(10, 13, 14)); - bayesNet.push_back(boost::make_shared(9, 12, 14)); - bayesNet.push_back(boost::make_shared(8, 12, 14)); + bayesNet.emplace_shared(11, 13, 14); + bayesNet.emplace_shared(10, 13, 14); + bayesNet.emplace_shared(9, 12, 14); + bayesNet.emplace_shared(8, 12, 14); - bayesNet.push_back(boost::make_shared(7, 11, 13)); - bayesNet.push_back(boost::make_shared(6, 11, 13)); - bayesNet.push_back(boost::make_shared(5, 10, 13)); - bayesNet.push_back(boost::make_shared(4, 10, 13)); + bayesNet.emplace_shared(7, 11, 13); + bayesNet.emplace_shared(6, 11, 13); + bayesNet.emplace_shared(5, 10, 13); + bayesNet.emplace_shared(4, 10, 13); - bayesNet.push_back(boost::make_shared(3, 9, 12)); - bayesNet.push_back(boost::make_shared(2, 9, 12)); - bayesNet.push_back(boost::make_shared(1, 8, 12)); - bayesNet.push_back(boost::make_shared(0, 8, 12)); + bayesNet.emplace_shared(3, 9, 12); + bayesNet.emplace_shared(2, 9, 12); + bayesNet.emplace_shared(1, 8, 12); + bayesNet.emplace_shared(0, 8, 12); if (debug) { GTSAM_PRINT(bayesNet); @@ -430,7 +376,8 @@ TEST( SymbolicBayesTree, thinTree ) { } // create a BayesTree out of a Bayes net - SymbolicBayesTree bayesTree = *SymbolicFactorGraph(bayesNet).eliminateMultifrontal(); + SymbolicBayesTree bayesTree = + *SymbolicFactorGraph(bayesNet).eliminateMultifrontal(); if (debug) { GTSAM_PRINT(bayesTree); bayesTree.saveGraph("/tmp/SymbolicBayesTree.dot"); @@ -442,7 +389,7 @@ TEST( SymbolicBayesTree, thinTree ) { // check shortcut P(S9||R) to root SymbolicBayesTree::Clique::shared_ptr c = bayesTree[9]; SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of(SymbolicConditional(14, 11, 13)); + auto expected = SymbolicBayesNet(SymbolicConditional(14, 11, 13)); EXPECT(assert_equal(expected, shortcut)); } @@ -450,9 +397,8 @@ TEST( SymbolicBayesTree, thinTree ) { // check shortcut P(S8||R) to root SymbolicBayesTree::Clique::shared_ptr c = bayesTree[8]; SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of - (SymbolicConditional(12, 14)) - (SymbolicConditional(14, 11, 13)); + auto expected = SymbolicBayesNet(SymbolicConditional(12, 14))( + SymbolicConditional(14, 11, 13)); EXPECT(assert_equal(expected, shortcut)); } @@ -460,7 +406,7 @@ TEST( SymbolicBayesTree, thinTree ) { // check shortcut P(S4||R) to root SymbolicBayesTree::Clique::shared_ptr c = bayesTree[4]; SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of(SymbolicConditional(10, 11, 13)); + auto expected = SymbolicBayesNet(SymbolicConditional(10, 11, 13)); EXPECT(assert_equal(expected, shortcut)); } @@ -468,8 +414,8 @@ TEST( SymbolicBayesTree, thinTree ) { // check shortcut P(S2||R) to root SymbolicBayesTree::Clique::shared_ptr c = bayesTree[2]; SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of(SymbolicConditional(9, 11, 12, 13)) - (SymbolicConditional(12, 11, 13)); + auto expected = SymbolicBayesNet(SymbolicConditional(9, 11, 12, 13))( + SymbolicConditional(12, 11, 13)); EXPECT(assert_equal(expected, shortcut)); } @@ -477,28 +423,28 @@ TEST( SymbolicBayesTree, thinTree ) { // check shortcut P(S0||R) to root SymbolicBayesTree::Clique::shared_ptr c = bayesTree[0]; SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of(SymbolicConditional(8, 11, 12, 13)) - (SymbolicConditional(12, 11, 13)); + auto expected = SymbolicBayesNet(SymbolicConditional(8, 11, 12, 13))( + SymbolicConditional(12, 11, 13)); EXPECT(assert_equal(expected, shortcut)); } SymbolicBayesNet::shared_ptr actualJoint; // Check joint P(8,2) - if (false) { // TODO, not disjoint + if (false) { // TODO, not disjoint actualJoint = bayesTree.jointBayesNet(8, 2); SymbolicBayesNet expected; - expected.push_back(boost::make_shared(8)); - expected.push_back(boost::make_shared(2, 8)); + expected.emplace_shared(8); + expected.emplace_shared(2, 8); EXPECT(assert_equal(expected, *actualJoint)); } // Check joint P(1,2) - if (false) { // TODO, not disjoint + if (false) { // TODO, not disjoint actualJoint = bayesTree.jointBayesNet(1, 2); SymbolicBayesNet expected; - expected.push_back(boost::make_shared(2)); - expected.push_back(boost::make_shared(1, 2)); + expected.emplace_shared(2); + expected.emplace_shared(1, 2); EXPECT(assert_equal(expected, *actualJoint)); } @@ -506,35 +452,33 @@ TEST( SymbolicBayesTree, thinTree ) { if (true) { actualJoint = bayesTree.jointBayesNet(2, 6); SymbolicBayesNet expected; - expected.push_back(boost::make_shared(2, 6)); - expected.push_back(boost::make_shared(6)); + expected.emplace_shared(2, 6); + expected.emplace_shared(6); EXPECT(assert_equal(expected, *actualJoint)); } // Check joint P(4,6) - if (false) { // TODO, not disjoint + if (false) { // TODO, not disjoint actualJoint = bayesTree.jointBayesNet(4, 6); SymbolicBayesNet expected; - expected.push_back(boost::make_shared(6)); - expected.push_back(boost::make_shared(4, 6)); + expected.emplace_shared(6); + expected.emplace_shared(4, 6); EXPECT(assert_equal(expected, *actualJoint)); } } /* ************************************************************************* */ -TEST(SymbolicBayesTree, forest_joint) -{ +TEST(SymbolicBayesTree, forest_joint) { // Create forest - SymbolicBayesTreeClique::shared_ptr root1 = MakeClique(list_of(1), 1); - SymbolicBayesTreeClique::shared_ptr root2 = MakeClique(list_of(2), 1); + sharedClique root1 = LeafClique(Keys(1), 1); + sharedClique root2 = LeafClique(Keys(2), 1); SymbolicBayesTree bayesTree; bayesTree.insertRoot(root1); bayesTree.insertRoot(root2); // Check joint - SymbolicBayesNet expected = list_of - (SymbolicConditional(1)) - (SymbolicConditional(2)); + auto expected = + SymbolicBayesNet(SymbolicConditional(1))(SymbolicConditional(2)); SymbolicBayesNet actual = *bayesTree.jointBayesNet(1, 2); EXPECT(assert_equal(expected, actual)); @@ -550,7 +494,7 @@ TEST(SymbolicBayesTree, forest_joint) C6 0 : 1 **************************************************************************** */ -TEST( SymbolicBayesTree, linear_smoother_shortcuts ) { +TEST(SymbolicBayesTree, linear_smoother_shortcuts) { // Create smoother with 7 nodes SymbolicFactorGraph smoother; smoother.push_factor(0); @@ -581,7 +525,8 @@ TEST( SymbolicBayesTree, linear_smoother_shortcuts ) { { // check shortcut P(S2||R) to root - SymbolicBayesTree::Clique::shared_ptr c = bayesTree[4]; // 4 is frontal in C2 + SymbolicBayesTree::Clique::shared_ptr c = + bayesTree[4]; // 4 is frontal in C2 SymbolicBayesNet shortcut = c->shortcut(R); SymbolicBayesNet expected; EXPECT(assert_equal(expected, shortcut)); @@ -589,45 +534,46 @@ TEST( SymbolicBayesTree, linear_smoother_shortcuts ) { { // check shortcut P(S3||R) to root - SymbolicBayesTree::Clique::shared_ptr c = bayesTree[3]; // 3 is frontal in C3 + SymbolicBayesTree::Clique::shared_ptr c = + bayesTree[3]; // 3 is frontal in C3 SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of(SymbolicConditional(4, 5)); + auto expected = SymbolicBayesNet(SymbolicConditional(4, 5)); EXPECT(assert_equal(expected, shortcut)); } { // check shortcut P(S4||R) to root - SymbolicBayesTree::Clique::shared_ptr c = bayesTree[2]; // 2 is frontal in C4 + SymbolicBayesTree::Clique::shared_ptr c = + bayesTree[2]; // 2 is frontal in C4 SymbolicBayesNet shortcut = c->shortcut(R); - SymbolicBayesNet expected = list_of(SymbolicConditional(3, 5)); + auto expected = SymbolicBayesNet(SymbolicConditional(3, 5)); EXPECT(assert_equal(expected, shortcut)); } } /* ************************************************************************* */ // from testSymbolicJunctionTree, which failed at one point -TEST(SymbolicBayesTree, complicatedMarginal) -{ +TEST(SymbolicBayesTree, complicatedMarginal) { // Create the conditionals to go in the BayesTree - SymbolicBayesTreeClique::shared_ptr cur; - SymbolicBayesTreeClique::shared_ptr root = MakeClique(list_of(11)(12), 2); + sharedClique cur; + sharedClique root = LeafClique(Keys(11)(12), 2); cur = root; - root->children += MakeClique(list_of(9)(10)(11)(12), 2); + root->children.push_back(LeafClique(Keys(9)(10)(11)(12), 2)); root->children.back()->parent_ = root; - root->children += MakeClique(list_of(7)(8)(11), 2); + root->children.push_back(LeafClique(Keys(7)(8)(11), 2)); root->children.back()->parent_ = root; cur = root->children.back(); - cur->children += MakeClique(list_of(5)(6)(7)(8), 2); + cur->children.push_back(LeafClique(Keys(5)(6)(7)(8), 2)); cur->children.back()->parent_ = cur; cur = cur->children.back(); - cur->children += MakeClique(list_of(3)(4)(6), 2); + cur->children.push_back(LeafClique(Keys(3)(4)(6), 2)); cur->children.back()->parent_ = cur; - cur->children += MakeClique(list_of(1)(2)(5), 2); + cur->children.push_back(LeafClique(Keys(1)(2)(5), 2)); cur->children.back()->parent_ = cur; // Create Bayes Tree @@ -656,9 +602,8 @@ TEST(SymbolicBayesTree, complicatedMarginal) { SymbolicBayesTree::Clique::shared_ptr c = bt[5]; SymbolicBayesNet shortcut = c->shortcut(root); - SymbolicBayesNet expected = list_of - (SymbolicConditional(7, 8, 11)) - (SymbolicConditional(8, 11)); + auto expected = SymbolicBayesNet(SymbolicConditional(7, 8, 11))( + SymbolicConditional(8, 11)); EXPECT(assert_equal(expected, shortcut)); } @@ -666,7 +611,7 @@ TEST(SymbolicBayesTree, complicatedMarginal) { SymbolicBayesTree::Clique::shared_ptr c = bt[3]; SymbolicBayesNet shortcut = c->shortcut(root); - SymbolicBayesNet expected = list_of(SymbolicConditional(6, 11)); + auto expected = SymbolicBayesNet(SymbolicConditional(6, 11)); EXPECT(assert_equal(expected, shortcut)); } @@ -674,7 +619,7 @@ TEST(SymbolicBayesTree, complicatedMarginal) { SymbolicBayesTree::Clique::shared_ptr c = bt[1]; SymbolicBayesNet shortcut = c->shortcut(root); - SymbolicBayesNet expected = list_of(SymbolicConditional(5, 11)); + auto expected = SymbolicBayesNet(SymbolicConditional(5, 11)); EXPECT(assert_equal(expected, shortcut)); } @@ -689,12 +634,10 @@ TEST(SymbolicBayesTree, complicatedMarginal) SymbolicFactor::shared_ptr actual = bt.marginalFactor(6); EXPECT(assert_equal(SymbolicFactor(6), *actual, 1e-1)); } - } /* ************************************************************************* */ TEST(SymbolicBayesTree, COLAMDvsMETIS) { - // create circular graph SymbolicFactorGraph sfg; sfg.push_factor(0, 1); @@ -707,20 +650,23 @@ TEST(SymbolicBayesTree, COLAMDvsMETIS) { // COLAMD { Ordering ordering = Ordering::Create(Ordering::COLAMD, sfg); - EXPECT(assert_equal(Ordering(list_of(0)(5)(1)(4)(2)(3)), ordering)); + EXPECT(assert_equal(Ordering{0, 5, 1, 4, 2, 3}, ordering)); // - P( 4 2 3) // | - P( 1 | 2 4) // | | - P( 5 | 1 4) // | | | - P( 0 | 1 5) SymbolicBayesTree expected; - expected.insertRoot( - MakeClique(list_of(4)(2)(3), 3, - list_of( - MakeClique(list_of(1)(2)(4), 1, - list_of( - MakeClique(list_of(5)(1)(4), 1, - list_of(MakeClique(list_of(0)(1)(5), 1)))))))); + expected.insertRoot( // + NodeClique( + Keys(4)(2)(3), 3, + Children( // + NodeClique( + Keys(1)(2)(4), 1, + Children( // + NodeClique(Keys(5)(1)(4), 1, + Children( // + LeafClique(Keys(0)(1)(5), 1)))))))); SymbolicBayesTree actual = *sfg.eliminateMultifrontal(ordering); EXPECT(assert_equal(expected, actual)); @@ -730,13 +676,13 @@ TEST(SymbolicBayesTree, COLAMDvsMETIS) { // METIS { Ordering ordering = Ordering::Create(Ordering::METIS, sfg); -// Linux and Mac split differently when using mettis +// Linux and Mac split differently when using Metis #if defined(__APPLE__) - EXPECT(assert_equal(Ordering(list_of(5)(4)(2)(1)(0)(3)), ordering)); + EXPECT(assert_equal(Ordering{5, 4, 2, 1, 0, 3}, ordering)); #elif defined(_WIN32) - EXPECT(assert_equal(Ordering(list_of(4)(3)(1)(0)(5)(2)), ordering)); + EXPECT(assert_equal(Ordering{4, 3, 1, 0, 5, 2}, ordering)); #else - EXPECT(assert_equal(Ordering(list_of(3)(2)(5)(0)(4)(1)), ordering)); + EXPECT(assert_equal(Ordering{3, 2, 5, 0, 4, 1}, ordering)); #endif // - P( 1 0 3) @@ -746,25 +692,25 @@ TEST(SymbolicBayesTree, COLAMDvsMETIS) { SymbolicBayesTree expected; #if defined(__APPLE__) expected.insertRoot( - MakeClique(list_of(1)(0)(3), 3, - list_of( - MakeClique(list_of(4)(0)(3), 1, - list_of(MakeClique(list_of(5)(0)(4), 1))))( - MakeClique(list_of(2)(1)(3), 1)))); + NodeClique(Keys(1)(0)(3), 3, + Children( // + NodeClique(Keys(4)(0)(3), 1, // + {LeafClique(Keys(5)(0)(4), 1)}))( + LeafClique(Keys(2)(1)(3), 1)))); #elif defined(_WIN32) expected.insertRoot( - MakeClique(list_of(3)(5)(2), 3, - list_of( - MakeClique(list_of(4)(3)(5), 1, - list_of(MakeClique(list_of(0)(2)(5), 1))))( - MakeClique(list_of(1)(0)(2), 1)))); + NodeClique(Keys(3)(5)(2), 3, + Children( // + NodeClique(Keys(4)(3)(5), 1, // + {LeafClique(Keys(0)(2)(5), 1)}))( + LeafClique(Keys(1)(0)(2), 1)))); #else expected.insertRoot( - MakeClique(list_of(2)(4)(1), 3, - list_of( - MakeClique(list_of(0)(1)(4), 1, - list_of(MakeClique(list_of(5)(0)(4), 1))))( - MakeClique(list_of(3)(2)(4), 1)))); + NodeClique(Keys(2)(4)(1), 3, + Children( // + NodeClique(Keys(0)(1)(4), 1, // + {LeafClique(Keys(5)(0)(4), 1)}))( + LeafClique(Keys(3)(2)(4), 1)))); #endif SymbolicBayesTree actual = *sfg.eliminateMultifrontal(ordering); EXPECT(assert_equal(expected, actual)); @@ -778,4 +724,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam/symbolic/tests/testSymbolicConditional.cpp b/gtsam/symbolic/tests/testSymbolicConditional.cpp index 963d0dfef8..d8c13d0922 100644 --- a/gtsam/symbolic/tests/testSymbolicConditional.cpp +++ b/gtsam/symbolic/tests/testSymbolicConditional.cpp @@ -15,8 +15,6 @@ * @author Frank Dellaert */ -#include -using namespace boost::assign; #include #include @@ -69,8 +67,7 @@ TEST( SymbolicConditional, threeParents ) /* ************************************************************************* */ TEST( SymbolicConditional, fourParents ) { - SymbolicConditional c0 = SymbolicConditional::FromKeys( - list_of(0)(1)(2)(3)(4), 1); + auto c0 = SymbolicConditional::FromKeys(KeyVector{0, 1, 2, 3, 4}, 1); LONGS_EQUAL(1, (long)c0.nrFrontals()); LONGS_EQUAL(4, (long)c0.nrParents()); } @@ -78,9 +75,8 @@ TEST( SymbolicConditional, fourParents ) /* ************************************************************************* */ TEST( SymbolicConditional, FromRange ) { - SymbolicConditional::shared_ptr c0 = - boost::make_shared( - SymbolicConditional::FromKeys(list_of(1)(2)(3)(4)(5), 2)); + auto c0 = boost::make_shared( + SymbolicConditional::FromKeys(KeyVector{1, 2, 3, 4, 5}, 2)); LONGS_EQUAL(2, (long)c0->nrFrontals()); LONGS_EQUAL(3, (long)c0->nrParents()); } diff --git a/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp b/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp index 78bb2182c7..cb370f5539 100644 --- a/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp +++ b/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp @@ -17,47 +17,48 @@ */ #include - -#include -#include -#include - #include -#include #include +#include + +#include +#include #include "symbolicExampleGraphs.h" using namespace gtsam; using namespace gtsam::symbol_shorthand; using namespace std; -using boost::assign::list_of; +using sharedNode = SymbolicEliminationTree::sharedNode; + +// Use list_of replacement defined in symbolicExampleGraphs.h +using ChildNodes = ChainedVector; class EliminationTreeTester { -public: + public: // build hardcoded tree - static SymbolicEliminationTree buildHardcodedTree(const SymbolicFactorGraph& fg) { - - SymbolicEliminationTree::sharedNode leaf0(new SymbolicEliminationTree::Node); + static SymbolicEliminationTree buildHardcodedTree( + const SymbolicFactorGraph& fg) { + sharedNode leaf0(new SymbolicEliminationTree::Node); leaf0->key = 0; leaf0->factors.push_back(fg[0]); leaf0->factors.push_back(fg[1]); - SymbolicEliminationTree::sharedNode node1(new SymbolicEliminationTree::Node); + sharedNode node1(new SymbolicEliminationTree::Node); node1->key = 1; node1->factors.push_back(fg[2]); node1->children.push_back(leaf0); - SymbolicEliminationTree::sharedNode node2(new SymbolicEliminationTree::Node); + sharedNode node2(new SymbolicEliminationTree::Node); node2->key = 2; node2->factors.push_back(fg[3]); node2->children.push_back(node1); - SymbolicEliminationTree::sharedNode leaf3(new SymbolicEliminationTree::Node); + sharedNode leaf3(new SymbolicEliminationTree::Node); leaf3->key = 3; leaf3->factors.push_back(fg[4]); - SymbolicEliminationTree::sharedNode root(new SymbolicEliminationTree::Node); + sharedNode root(new SymbolicEliminationTree::Node); root->key = 4; root->children.push_back(leaf3); root->children.push_back(node2); @@ -67,85 +68,100 @@ class EliminationTreeTester { return tree; } - template - static SymbolicEliminationTree MakeTree(const ROOTS& roots) - { + static SymbolicEliminationTree MakeTree(const ChildNodes::Result& roots) { SymbolicEliminationTree et; et.roots_.assign(roots.begin(), roots.end()); return et; } }; -template -static SymbolicEliminationTree::sharedNode MakeNode(Key key, const FACTORS& factors) -{ - SymbolicEliminationTree::sharedNode node = boost::make_shared(); +// Create a leaf node. +static sharedNode Leaf(Key key, const SymbolicFactorGraph& factors) { + sharedNode node(new SymbolicEliminationTree::Node()); node->key = key; - SymbolicFactorGraph factorsAsGraph = factors; - node->factors.assign(factorsAsGraph.begin(), factorsAsGraph.end()); + node->factors.assign(factors.begin(), factors.end()); return node; } -template -static SymbolicEliminationTree::sharedNode MakeNode(Key key, const FACTORS& factors, const CHILDREN& children) -{ - SymbolicEliminationTree::sharedNode node = boost::make_shared(); - node->key = key; - SymbolicFactorGraph factorsAsGraph = factors; - node->factors.assign(factorsAsGraph.begin(), factorsAsGraph.end()); +// Create a node with children. +static sharedNode Node(Key key, const SymbolicFactorGraph& factors, + const ChildNodes::Result& children) { + sharedNode node = Leaf(key, factors); node->children.assign(children.begin(), children.end()); return node; } - /* ************************************************************************* */ -TEST(EliminationTree, Create) -{ +TEST(EliminationTree, Create) { SymbolicEliminationTree expected = - EliminationTreeTester::buildHardcodedTree(simpleTestGraph1); + EliminationTreeTester::buildHardcodedTree(simpleTestGraph1); // Build from factor graph - Ordering order; - order += 0,1,2,3,4; + const Ordering order{0, 1, 2, 3, 4}; SymbolicEliminationTree actual(simpleTestGraph1, order); CHECK(assert_equal(expected, actual)); } /* ************************************************************************* */ -TEST(EliminationTree, Create2) -{ +TEST(EliminationTree, Create2) { // l1 l2 // / | / | // x1 --- x2 --- x3 --- x4 --- x5 // \ | // l3 SymbolicFactorGraph graph; - graph += SymbolicFactor(X(1), L(1)); - graph += SymbolicFactor(X(1), X(2)); - graph += SymbolicFactor(X(2), L(1)); - graph += SymbolicFactor(X(2), X(3)); - graph += SymbolicFactor(X(3), X(4)); - graph += SymbolicFactor(X(4), L(2)); - graph += SymbolicFactor(X(4), X(5)); - graph += SymbolicFactor(L(2), X(5)); - graph += SymbolicFactor(X(4), L(3)); - graph += SymbolicFactor(X(5), L(3)); - - SymbolicEliminationTree expected = EliminationTreeTester::MakeTree(list_of - (MakeNode(X(3), SymbolicFactorGraph(), list_of - (MakeNode(X(2), list_of(SymbolicFactor(X(2), X(3))), list_of - (MakeNode(L(1), list_of(SymbolicFactor(X(2), L(1))), list_of - (MakeNode(X(1), list_of(SymbolicFactor(X(1), L(1))) (SymbolicFactor(X(1), X(2))))))))) - (MakeNode(X(4), list_of(SymbolicFactor(X(3), X(4))), list_of - (MakeNode(L(2), list_of(SymbolicFactor(X(4), L(2))), list_of - (MakeNode(X(5), list_of(SymbolicFactor(X(4), X(5))) (SymbolicFactor(L(2), X(5))), list_of - (MakeNode(L(3), list_of(SymbolicFactor(X(4), L(3))) (SymbolicFactor(X(5), L(3)))))))))))))); - - Ordering order = list_of(X(1)) (L(3)) (L(1)) (X(5)) (X(2)) (L(2)) (X(4)) (X(3)); - + auto binary = [](Key j1, Key j2) -> SymbolicFactor { + return SymbolicFactor(j1, j2); + }; + graph += binary(X(1), L(1)); + graph += binary(X(1), X(2)); + graph += binary(X(2), L(1)); + graph += binary(X(2), X(3)); + graph += binary(X(3), X(4)); + graph += binary(X(4), L(2)); + graph += binary(X(4), X(5)); + graph += binary(L(2), X(5)); + graph += binary(X(4), L(3)); + graph += binary(X(5), L(3)); + + SymbolicEliminationTree expected = EliminationTreeTester::MakeTree( // + ChildNodes( // + Node(X(3), SymbolicFactorGraph(), + ChildNodes( // + Node(X(2), SymbolicFactorGraph(binary(X(2), X(3))), + ChildNodes( // + Node(L(1), SymbolicFactorGraph(binary(X(2), L(1))), + ChildNodes( // + Leaf(X(1), SymbolicFactorGraph( + binary(X(1), L(1)))( + binary(X(1), X(2)))))))))( + Node(X(4), SymbolicFactorGraph(binary(X(3), X(4))), + ChildNodes( // + Node(L(2), SymbolicFactorGraph(binary(X(4), L(2))), + ChildNodes( // + Node(X(5), + SymbolicFactorGraph(binary( + X(4), X(5)))(binary(L(2), X(5))), + ChildNodes( // + Leaf(L(3), + SymbolicFactorGraph( + binary(X(4), L(3)))( + binary(X(5), L(3))) // + ) // + ) // + ) // + ) // + ) // + ) // + ) // + ) // + ) // + ) // + ); + + const Ordering order{X(1), L(3), L(1), X(5), X(2), L(2), X(4), X(3)}; SymbolicEliminationTree actual(graph, order); - EXPECT(assert_equal(expected, actual)); } diff --git a/gtsam/symbolic/tests/testSymbolicFactor.cpp b/gtsam/symbolic/tests/testSymbolicFactor.cpp index 378e780cde..a8001232a9 100644 --- a/gtsam/symbolic/tests/testSymbolicFactor.cpp +++ b/gtsam/symbolic/tests/testSymbolicFactor.cpp @@ -21,14 +21,11 @@ #include #include -#include -#include #include #include using namespace std; using namespace gtsam; -using namespace boost::assign; /* ************************************************************************* */ #ifdef TRACK_ELIMINATE @@ -70,19 +67,19 @@ TEST(SymbolicFactor, Constructors) /* ************************************************************************* */ TEST(SymbolicFactor, EliminateSymbolic) { - const SymbolicFactorGraph factors = list_of - (SymbolicFactor(2,4,6)) - (SymbolicFactor(1,2,5)) - (SymbolicFactor(0,3)); + const SymbolicFactorGraph factors = { + boost::make_shared(2, 4, 6), + boost::make_shared(1, 2, 5), + boost::make_shared(0, 3)}; const SymbolicFactor expectedFactor(4,5,6); const SymbolicConditional expectedConditional = - SymbolicConditional::FromKeys(list_of(0)(1)(2)(3)(4)(5)(6), 4); + SymbolicConditional::FromKeys(KeyVector{0,1,2,3,4,5,6}, 4); SymbolicFactor::shared_ptr actualFactor; SymbolicConditional::shared_ptr actualConditional; boost::tie(actualConditional, actualFactor) = - EliminateSymbolic(factors, list_of(0)(1)(2)(3)); + EliminateSymbolic(factors, Ordering{0, 1, 2, 3}); CHECK(assert_equal(expectedConditional, *actualConditional)); CHECK(assert_equal(expectedFactor, *actualFactor)); diff --git a/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp b/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp index 8f4eb3c08d..8d5885d873 100644 --- a/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp +++ b/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp @@ -15,34 +15,29 @@ * @author Christian Potthast **/ -#include - +#include #include #include #include #include +#include #include #include -#include - using namespace std; using namespace gtsam; -using namespace boost::assign; /* ************************************************************************* */ TEST(SymbolicFactorGraph, keys1) { - KeySet expected; - expected += 0, 1, 2, 3, 4; + KeySet expected{0, 1, 2, 3, 4}; KeySet actual = simpleTestGraph1.keys(); EXPECT(expected == actual); } /* ************************************************************************* */ TEST(SymbolicFactorGraph, keys2) { - KeySet expected; - expected += 0, 1, 2, 3, 4, 5; + KeySet expected{0, 1, 2, 3, 4, 5}; KeySet actual = simpleTestGraph2.keys(); EXPECT(expected == actual); } @@ -50,8 +45,7 @@ TEST(SymbolicFactorGraph, keys2) { /* ************************************************************************* */ TEST(SymbolicFactorGraph, eliminateFullSequential) { // Test with simpleTestGraph1 - Ordering order; - order += 0, 1, 2, 3, 4; + Ordering order{0, 1, 2, 3, 4}; SymbolicBayesNet actual1 = *simpleTestGraph1.eliminateSequential(order); EXPECT(assert_equal(simpleTestGraph1BayesNet, actual1)); @@ -63,18 +57,18 @@ TEST(SymbolicFactorGraph, eliminateFullSequential) { /* ************************************************************************* */ TEST(SymbolicFactorGraph, eliminatePartialSequential) { // Eliminate 0 and 1 - const Ordering order = list_of(0)(1); + const Ordering order{0, 1}; - const SymbolicBayesNet expectedBayesNet = - list_of(SymbolicConditional(0, 1, 2))(SymbolicConditional(1, 2, 3, 4)); + const auto expectedBayesNet = SymbolicBayesNet(SymbolicConditional(0, 1, 2))( + SymbolicConditional(1, 2, 3, 4)); - const SymbolicFactorGraph expectedSfg = list_of(SymbolicFactor(2, 3))( + const auto expectedSfg = SymbolicFactorGraph(SymbolicFactor(2, 3))( SymbolicFactor(4, 5))(SymbolicFactor(2, 3, 4)); SymbolicBayesNet::shared_ptr actualBayesNet; SymbolicFactorGraph::shared_ptr actualSfg; boost::tie(actualBayesNet, actualSfg) = - simpleTestGraph2.eliminatePartialSequential(Ordering(list_of(0)(1))); + simpleTestGraph2.eliminatePartialSequential(Ordering{0, 1}); EXPECT(assert_equal(expectedSfg, *actualSfg)); EXPECT(assert_equal(expectedBayesNet, *actualBayesNet)); @@ -82,8 +76,7 @@ TEST(SymbolicFactorGraph, eliminatePartialSequential) { SymbolicBayesNet::shared_ptr actualBayesNet2; SymbolicFactorGraph::shared_ptr actualSfg2; boost::tie(actualBayesNet2, actualSfg2) = - simpleTestGraph2.eliminatePartialSequential( - list_of(0)(1).convert_to_container()); + simpleTestGraph2.eliminatePartialSequential(Ordering{0, 1}); EXPECT(assert_equal(expectedSfg, *actualSfg2)); EXPECT(assert_equal(expectedBayesNet, *actualBayesNet2)); @@ -105,18 +98,18 @@ TEST(SymbolicFactorGraph, eliminatePartialMultifrontal) { SymbolicBayesTree expectedBayesTree; SymbolicConditional::shared_ptr root = boost::make_shared( - SymbolicConditional::FromKeys(list_of(4)(5)(1), 2)); + SymbolicConditional::FromKeys(KeyVector{4, 5, 1}, 2)); expectedBayesTree.insertRoot( boost::make_shared(root)); - SymbolicFactorGraph expectedFactorGraph = - list_of(SymbolicFactor(0, 1))(SymbolicFactor(0, 2))(SymbolicFactor(1, 3))( - SymbolicFactor(2, 3))(SymbolicFactor(1)); + const auto expectedFactorGraph = + SymbolicFactorGraph(SymbolicFactor(0, 1))(SymbolicFactor(0, 2))( + SymbolicFactor(1, 3))(SymbolicFactor(2, 3))(SymbolicFactor(1)); SymbolicBayesTree::shared_ptr actualBayesTree; SymbolicFactorGraph::shared_ptr actualFactorGraph; boost::tie(actualBayesTree, actualFactorGraph) = - simpleTestGraph2.eliminatePartialMultifrontal(Ordering(list_of(4)(5))); + simpleTestGraph2.eliminatePartialMultifrontal(Ordering{4, 5}); EXPECT(assert_equal(expectedFactorGraph, *actualFactorGraph)); EXPECT(assert_equal(expectedBayesTree, *actualBayesTree)); @@ -132,8 +125,7 @@ TEST(SymbolicFactorGraph, eliminatePartialMultifrontal) { SymbolicBayesTree::shared_ptr actualBayesTree2; SymbolicFactorGraph::shared_ptr actualFactorGraph2; boost::tie(actualBayesTree2, actualFactorGraph2) = - simpleTestGraph2.eliminatePartialMultifrontal( - list_of(4)(5).convert_to_container()); + simpleTestGraph2.eliminatePartialMultifrontal(KeyVector{4, 5}); EXPECT(assert_equal(expectedFactorGraph, *actualFactorGraph2)); EXPECT(assert_equal(expectedBayesTree2, *actualBayesTree2)); @@ -141,12 +133,12 @@ TEST(SymbolicFactorGraph, eliminatePartialMultifrontal) { /* ************************************************************************* */ TEST(SymbolicFactorGraph, marginalMultifrontalBayesNet) { - SymbolicBayesNet expectedBayesNet = - list_of(SymbolicConditional(0, 1, 2))(SymbolicConditional(1, 2, 3))( - SymbolicConditional(2, 3))(SymbolicConditional(3)); + auto expectedBayesNet = + SymbolicBayesNet(SymbolicConditional(0, 1, 2))(SymbolicConditional( + 1, 2, 3))(SymbolicConditional(2, 3))(SymbolicConditional(3)); - SymbolicBayesNet actual1 = *simpleTestGraph2.marginalMultifrontalBayesNet( - Ordering(list_of(0)(1)(2)(3))); + SymbolicBayesNet actual1 = + *simpleTestGraph2.marginalMultifrontalBayesNet(Ordering{0, 1, 2, 3}); EXPECT(assert_equal(expectedBayesNet, actual1)); } @@ -184,7 +176,7 @@ TEST(SymbolicFactorGraph, marginals) { fg.push_factor(3, 4); // eliminate - Ordering ord(list_of(3)(4)(2)(1)(0)); + Ordering ord{3, 4, 2, 1, 0}; auto actual = fg.eliminateSequential(ord); SymbolicBayesNet expected; expected.emplace_shared(3, 4); @@ -196,7 +188,7 @@ TEST(SymbolicFactorGraph, marginals) { { // jointBayesNet - Ordering ord(list_of(0)(4)(3)); + Ordering ord{0, 4, 3}; auto actual = fg.eliminatePartialSequential(ord); SymbolicBayesNet expectedBN; expectedBN.emplace_shared(0, 1, 2); @@ -207,7 +199,7 @@ TEST(SymbolicFactorGraph, marginals) { { // jointBayesNet - Ordering ord(list_of(0)(2)(3)); + Ordering ord{0, 2, 3}; auto actual = fg.eliminatePartialSequential(ord); SymbolicBayesNet expectedBN; expectedBN.emplace_shared(0, 1, 2); @@ -218,7 +210,7 @@ TEST(SymbolicFactorGraph, marginals) { { // conditionalBayesNet - Ordering ord(list_of(0)(2)); + Ordering ord{0, 2}; auto actual = fg.eliminatePartialSequential(ord); SymbolicBayesNet expectedBN; expectedBN.emplace_shared(0, 1, 2); @@ -306,7 +298,7 @@ TEST(SymbolicFactorGraph, add_factors) { expected.push_factor(1); expected.push_factor(11); expected.push_factor(2); - const FactorIndices expectedIndices = list_of(1)(3); + const FactorIndices expectedIndices{1, 3}; const FactorIndices actualIndices = fg1.add_factors(fg2, true); EXPECT(assert_equal(expected, fg1)); @@ -314,7 +306,7 @@ TEST(SymbolicFactorGraph, add_factors) { expected.push_factor(1); expected.push_factor(2); - const FactorIndices expectedIndices2 = list_of(4)(5); + const FactorIndices expectedIndices2{4, 5}; const FactorIndices actualIndices2 = fg1.add_factors(fg2, false); EXPECT(assert_equal(expected, fg1)); diff --git a/gtsam/symbolic/tests/testSymbolicISAM.cpp b/gtsam/symbolic/tests/testSymbolicISAM.cpp index e3ab36c943..e84af28a3b 100644 --- a/gtsam/symbolic/tests/testSymbolicISAM.cpp +++ b/gtsam/symbolic/tests/testSymbolicISAM.cpp @@ -20,9 +20,6 @@ #include -#include // for operator += -using namespace boost::assign; - using namespace std; using namespace gtsam; @@ -86,7 +83,7 @@ TEST( SymbolicISAM, iSAM ) fullGraph += SymbolicFactor(_B_, _S_); // This ordering is chosen to match the one chosen by COLAMD during the ISAM update - Ordering ordering(list_of(_X_)(_B_)(_S_)(_E_)(_L_)(_T_)); + Ordering ordering {_X_, _B_, _S_, _E_, _L_, _T_}; SymbolicBayesTree expected = *fullGraph.eliminateMultifrontal(ordering); // Add factor on B and S diff --git a/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp b/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp index c5b1f4ff1f..796bdc49e3 100644 --- a/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp +++ b/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp @@ -23,9 +23,6 @@ #include #include -#include -using namespace boost::assign; - #include "symbolicExampleGraphs.h" using namespace gtsam; @@ -43,9 +40,9 @@ TEST( JunctionTree, constructor ) SymbolicJunctionTree actual(SymbolicEliminationTree(simpleChain, order)); SymbolicJunctionTree::Node::Keys - frontal1 = list_of(2)(3), - frontal2 = list_of(0)(1), - sep1, sep2 = list_of(2); + frontal1 {2, 3}, + frontal2 {0, 1}, + sep1, sep2 {2}; EXPECT(assert_container_equality(frontal1, actual.roots().front()->orderedFrontalKeys)); //EXPECT(assert_equal(sep1, actual.roots().front()->separator)); LONGS_EQUAL(1, (long)actual.roots().front()->factors.size()); diff --git a/gtsam/symbolic/tests/testVariableIndex.cpp b/gtsam/symbolic/tests/testVariableIndex.cpp index 6afb47e26a..8fdb7bee18 100644 --- a/gtsam/symbolic/tests/testVariableIndex.cpp +++ b/gtsam/symbolic/tests/testVariableIndex.cpp @@ -22,10 +22,6 @@ #include -#include -#include -using namespace boost::assign; - using namespace std; using namespace gtsam; @@ -79,7 +75,7 @@ TEST(VariableIndex, augment2) { VariableIndex expected(fgCombined); - FactorIndices newIndices = list_of(5)(6)(7)(8); + FactorIndices newIndices {5, 6, 7, 8}; VariableIndex actual(fg1); actual.augment(fg2, newIndices); @@ -108,7 +104,7 @@ TEST(VariableIndex, remove) { vector indices; indices.push_back(0); indices.push_back(1); indices.push_back(2); indices.push_back(3); actual.remove(indices.begin(), indices.end(), fg1); - std::list unusedVariables; unusedVariables += 0, 9; + std::list unusedVariables{0, 9}; actual.removeUnusedVariables(unusedVariables.begin(), unusedVariables.end()); CHECK(assert_equal(expected, actual)); @@ -135,7 +131,7 @@ TEST(VariableIndex, deep_copy) { vector indices; indices.push_back(0); indices.push_back(1); indices.push_back(2); indices.push_back(3); clone.remove(indices.begin(), indices.end(), fg1); - std::list unusedVariables; unusedVariables += 0, 9; + std::list unusedVariables{0, 9}; clone.removeUnusedVariables(unusedVariables.begin(), unusedVariables.end()); // When modifying the clone, the original should have stayed the same diff --git a/gtsam_unstable/base/tests/testBTree.cpp b/gtsam_unstable/base/tests/testBTree.cpp index fcdbd0393a..b74a5e5ea2 100644 --- a/gtsam_unstable/base/tests/testBTree.cpp +++ b/gtsam_unstable/base/tests/testBTree.cpp @@ -17,12 +17,12 @@ */ #include -#include // for += -using namespace boost::assign; #include #include +#include + using namespace std; using namespace gtsam; @@ -147,13 +147,12 @@ TEST( BTree, iterating ) // acid iterator test int sum = 0; - for(const KeyInt& p: tree) -sum += p.second; + for (const KeyInt& p : tree) sum += p.second; LONGS_EQUAL(15,sum) // STL iterator test - list expected, actual; - expected += p1,p2,p3,p4,p5; + auto expected = std::list {p1, p2, p3, p4, p5}; + std::list actual; copy (tree.begin(),tree.end(),back_inserter(actual)); CHECK(actual==expected) } @@ -181,7 +180,7 @@ TEST( BTree, stress ) tree = tree.add(key, value); LONGS_EQUAL(i,tree.size()) CHECK(tree.find(key) == value) - expected += make_pair(key, value); + expected.emplace_back(key, value); } // Check height is log(N) diff --git a/gtsam_unstable/base/tests/testDSF.cpp b/gtsam_unstable/base/tests/testDSF.cpp index c3a59aada0..019963e59d 100644 --- a/gtsam_unstable/base/tests/testDSF.cpp +++ b/gtsam_unstable/base/tests/testDSF.cpp @@ -20,10 +20,6 @@ #include -#include -#include -using namespace boost::assign; - #include using namespace std; @@ -88,7 +84,7 @@ TEST(DSF, makePair) { /* ************************************************************************* */ TEST(DSF, makeList) { DSFInt dsf; - list keys; keys += 5, 6, 7; + list keys{5, 6, 7}; dsf = dsf.makeList(keys); EXPECT(dsf.findSet(5) == dsf.findSet(7)); } @@ -112,7 +108,7 @@ TEST(DSF, sets) { map > sets = dsf.sets(); LONGS_EQUAL(1, sets.size()); - set expected; expected += 5, 6; + set expected{5, 6}; EXPECT(expected == sets[dsf.findSet(5)]); } @@ -127,7 +123,7 @@ TEST(DSF, sets2) { map > sets = dsf.sets(); LONGS_EQUAL(1, sets.size()); - set expected; expected += 5, 6, 7; + set expected{5, 6, 7}; EXPECT(expected == sets[dsf.findSet(5)]); } @@ -141,7 +137,7 @@ TEST(DSF, sets3) { map > sets = dsf.sets(); LONGS_EQUAL(2, sets.size()); - set expected; expected += 5, 6; + set expected{5, 6}; EXPECT(expected == sets[dsf.findSet(5)]); } @@ -152,11 +148,11 @@ TEST(DSF, partition) { dsf = dsf.makeSet(6); dsf = dsf.makeUnion(5,6); - list keys; keys += 5; + list keys{5}; map > partitions = dsf.partition(keys); LONGS_EQUAL(1, partitions.size()); - set expected; expected += 5; + set expected{5}; EXPECT(expected == partitions[dsf.findSet(5)]); } @@ -168,11 +164,11 @@ TEST(DSF, partition2) { dsf = dsf.makeSet(7); dsf = dsf.makeUnion(5,6); - list keys; keys += 7; + list keys{7}; map > partitions = dsf.partition(keys); LONGS_EQUAL(1, partitions.size()); - set expected; expected += 7; + set expected{7}; EXPECT(expected == partitions[dsf.findSet(7)]); } @@ -184,11 +180,11 @@ TEST(DSF, partition3) { dsf = dsf.makeSet(7); dsf = dsf.makeUnion(5,6); - list keys; keys += 5, 7; + list keys{5, 7}; map > partitions = dsf.partition(keys); LONGS_EQUAL(2, partitions.size()); - set expected; expected += 5; + set expected{5}; EXPECT(expected == partitions[dsf.findSet(5)]); } @@ -202,7 +198,7 @@ TEST(DSF, set) { set set = dsf.set(5); LONGS_EQUAL(2, set.size()); - std::set expected; expected += 5, 6; + std::set expected{5, 6}; EXPECT(expected == set); } @@ -217,7 +213,7 @@ TEST(DSF, set2) { set set = dsf.set(5); LONGS_EQUAL(3, set.size()); - std::set expected; expected += 5, 6, 7; + std::set expected{5, 6, 7}; EXPECT(expected == set); } @@ -261,7 +257,7 @@ TEST(DSF, flatten) { /* ************************************************************************* */ TEST(DSF, flatten2) { static string x1("x1"), x2("x2"), x3("x3"), x4("x4"); - list keys; keys += x1,x2,x3,x4; + list keys{x1, x2, x3, x4}; DSF dsf(keys); dsf = dsf.makeUnion(x1,x2); dsf = dsf.makeUnion(x3,x4); @@ -285,13 +281,12 @@ TEST(DSF, mergePairwiseMatches) { Measurement m22(2,2),m23(2,3),m25(2,5),m26(2,6); // in image 2 // Add them all - list measurements; - measurements += m11,m12,m14, m22,m23,m25,m26; + list measurements{m11, m12, m14, m22, m23, m25, m26}; // Create some "matches" typedef pair Match; - list matches; - matches += Match(m11,m22), Match(m12,m23), Match(m14,m25), Match(m14,m26); + list matches{Match(m11, m22), Match(m12, m23), Match(m14, m25), + Match(m14, m26)}; // Merge matches DSF dsf(measurements); @@ -310,15 +305,15 @@ TEST(DSF, mergePairwiseMatches) { // Check that we have three connected components EXPECT_LONGS_EQUAL(3, dsf.numSets()); - set expected1; expected1 += m11,m22; + set expected1{m11,m22}; set actual1 = dsf.set(m11); EXPECT(expected1 == actual1); - set expected2; expected2 += m12,m23; + set expected2{m12,m23}; set actual2 = dsf.set(m12); EXPECT(expected2 == actual2); - set expected3; expected3 += m14,m25,m26; + set expected3{m14,m25,m26}; set actual3 = dsf.set(m14); EXPECT(expected3 == actual3); } diff --git a/gtsam_unstable/discrete/Constraint.h b/gtsam_unstable/discrete/Constraint.h index 168891e6fd..691c659fc4 100644 --- a/gtsam_unstable/discrete/Constraint.h +++ b/gtsam_unstable/discrete/Constraint.h @@ -21,7 +21,6 @@ #include #include -#include #include #include @@ -40,11 +39,10 @@ class GTSAM_UNSTABLE_EXPORT Constraint : public DiscreteFactor { protected: /// Construct unary constraint factor. - Constraint(Key j) : DiscreteFactor(boost::assign::cref_list_of<1>(j)) {} + Constraint(Key j) : DiscreteFactor(KeyVector{j}) {} /// Construct binary constraint factor. - Constraint(Key j1, Key j2) - : DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) {} + Constraint(Key j1, Key j2) : DiscreteFactor(KeyVector{j1, j2}) {} /// Construct n-way constraint factor. Constraint(const KeyVector& js) : DiscreteFactor(js) {} diff --git a/gtsam_unstable/discrete/Domain.cpp b/gtsam_unstable/discrete/Domain.cpp index 7acc10cb4a..cf0da42e77 100644 --- a/gtsam_unstable/discrete/Domain.cpp +++ b/gtsam_unstable/discrete/Domain.cpp @@ -37,8 +37,7 @@ double Domain::operator()(const DiscreteValues& values) const { /* ************************************************************************* */ DecisionTreeFactor Domain::toDecisionTreeFactor() const { - DiscreteKeys keys; - keys += DiscreteKey(key(), cardinality_); + const DiscreteKeys keys{DiscreteKey(key(), cardinality_)}; vector table; for (size_t i1 = 0; i1 < cardinality_; ++i1) table.push_back(contains(i1)); DecisionTreeFactor converted(keys, table); diff --git a/gtsam_unstable/discrete/SingleValue.cpp b/gtsam_unstable/discrete/SingleValue.cpp index 6dd81a7dc6..fc5fab83f6 100644 --- a/gtsam_unstable/discrete/SingleValue.cpp +++ b/gtsam_unstable/discrete/SingleValue.cpp @@ -29,8 +29,7 @@ double SingleValue::operator()(const DiscreteValues& values) const { /* ************************************************************************* */ DecisionTreeFactor SingleValue::toDecisionTreeFactor() const { - DiscreteKeys keys; - keys += DiscreteKey(keys_[0], cardinality_); + const DiscreteKeys keys{DiscreteKey(keys_[0], cardinality_)}; vector table; for (size_t i1 = 0; i1 < cardinality_; i1++) table.push_back(i1 == value_); DecisionTreeFactor converted(keys, table); diff --git a/gtsam_unstable/discrete/examples/schedulingExample.cpp b/gtsam_unstable/discrete/examples/schedulingExample.cpp index 487edc97a3..bf5f2f25cf 100644 --- a/gtsam_unstable/discrete/examples/schedulingExample.cpp +++ b/gtsam_unstable/discrete/examples/schedulingExample.cpp @@ -12,14 +12,11 @@ #include #include -#include -#include #include #include #include -using namespace boost::assign; using namespace std; using namespace gtsam; @@ -217,8 +214,7 @@ void sampleSolutions() { vector samplers(7); // Given the time-slots, we can create 7 independent samplers - vector slots; - slots += 16, 17, 11, 2, 0, 5, 9; // given slots + vector slots{16, 17, 11, 2, 0, 5, 9}; // given slots for (size_t i = 0; i < 7; i++) samplers[i] = createSampler(i, slots[i], schedulers); @@ -299,8 +295,7 @@ void accomodateStudent() { scheduler.print("scheduler"); // rule out all occupied slots - vector slots; - slots += 16, 17, 11, 2, 0, 5, 9, 14; + vector slots{16, 17, 11, 2, 0, 5, 9, 14}; vector slotsAvailable(scheduler.nrTimeSlots(), 1.0); for(size_t s: slots) slotsAvailable[s] = 0; diff --git a/gtsam_unstable/discrete/examples/schedulingQuals12.cpp b/gtsam_unstable/discrete/examples/schedulingQuals12.cpp index 830d59ba73..a15b6f922d 100644 --- a/gtsam_unstable/discrete/examples/schedulingQuals12.cpp +++ b/gtsam_unstable/discrete/examples/schedulingQuals12.cpp @@ -12,14 +12,11 @@ #include #include -#include -#include #include #include #include -using namespace boost::assign; using namespace std; using namespace gtsam; @@ -226,8 +223,7 @@ void sampleSolutions() { vector samplers(NRSTUDENTS); // Given the time-slots, we can create NRSTUDENTS independent samplers - vector slots; - slots += 3, 20, 2, 6, 5, 11, 1, 4; // given slots + vector slots{3, 20, 2, 6, 5, 11, 1, 4}; // given slots for (size_t i = 0; i < NRSTUDENTS; i++) samplers[i] = createSampler(i, slots[i], schedulers); diff --git a/gtsam_unstable/discrete/examples/schedulingQuals13.cpp b/gtsam_unstable/discrete/examples/schedulingQuals13.cpp index b24f9bf0a4..e888875a45 100644 --- a/gtsam_unstable/discrete/examples/schedulingQuals13.cpp +++ b/gtsam_unstable/discrete/examples/schedulingQuals13.cpp @@ -12,14 +12,11 @@ #include #include -#include -#include #include #include #include -using namespace boost::assign; using namespace std; using namespace gtsam; @@ -251,8 +248,7 @@ void sampleSolutions() { vector samplers(NRSTUDENTS); // Given the time-slots, we can create NRSTUDENTS independent samplers - vector slots; - slots += 12,11,13, 21,16,1, 3,2,6, 7,22,4; // given slots + vector slots{12,11,13, 21,16,1, 3,2,6, 7,22,4}; // given slots for (size_t i = 0; i < NRSTUDENTS; i++) samplers[i] = createSampler(i, slots[i], schedulers); diff --git a/gtsam_unstable/discrete/tests/testCSP.cpp b/gtsam_unstable/discrete/tests/testCSP.cpp index fb386b2553..5756cb99ce 100644 --- a/gtsam_unstable/discrete/tests/testCSP.cpp +++ b/gtsam_unstable/discrete/tests/testCSP.cpp @@ -8,8 +8,6 @@ #include #include -#include -using boost::assign::insert; #include #include @@ -133,8 +131,7 @@ TEST(CSP, allInOne) { // Solve auto mpe = csp.optimize(); - DiscreteValues expected; - insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 1); + DiscreteValues expected {{ID.first, 1}, {UT.first, 0}, {AZ.first, 1}}; EXPECT(assert_equal(expected, mpe)); EXPECT_DOUBLES_EQUAL(1, csp(mpe), 1e-9); } @@ -172,8 +169,8 @@ TEST(CSP, WesternUS) { csp.addAllDiff(WY, CO); csp.addAllDiff(CO, NM); - DiscreteValues mpe; - insert(mpe)(0, 2)(1, 3)(2, 2)(3, 1)(4, 1)(5, 3)(6, 3)(7, 2)(8, 0)(9, 1)(10, 0); + DiscreteValues mpe{{0, 2}, {1, 3}, {2, 2}, {3, 1}, {4, 1}, {5, 3}, + {6, 3}, {7, 2}, {8, 0}, {9, 1}, {10, 0}}; // Create ordering according to example in ND-CSP.lyx Ordering ordering; @@ -224,8 +221,7 @@ TEST(CSP, ArcConsistency) { // Solve auto mpe = csp.optimize(); - DiscreteValues expected; - insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 2); + DiscreteValues expected {{ID.first, 1}, {UT.first, 0}, {AZ.first, 2}}; EXPECT(assert_equal(expected, mpe)); EXPECT_DOUBLES_EQUAL(1, csp(mpe), 1e-9); diff --git a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp index eac0d834e6..0e27b5fbb6 100644 --- a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp +++ b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp @@ -11,14 +11,12 @@ #include #include -#include #include #include #include using namespace std; using namespace boost; -using namespace boost::assign; using namespace gtsam; /** @@ -105,7 +103,7 @@ class LoopyBelief { if (debug) subGraph.print("------- Subgraph:"); DiscreteFactor::shared_ptr message; boost::tie(dummyCond, message) = - EliminateDiscrete(subGraph, Ordering(list_of(neighbor))); + EliminateDiscrete(subGraph, Ordering{neighbor}); // store the new factor into messages messages.insert(make_pair(neighbor, message)); if (debug) message->print("------- Message: "); @@ -230,7 +228,7 @@ TEST_UNSAFE(LoopyBelief, construction) { // Map from key to DiscreteKey for building belief factor. // TODO: this is bad! - std::map allKeys = map_list_of(0, C)(1, S)(2, R)(3, W); + std::map allKeys{{0, C}, {1, S}, {2, R}, {3, W}}; // Build graph DecisionTreeFactor pC(C, "0.5 0.5"); diff --git a/gtsam_unstable/discrete/tests/testScheduler.cpp b/gtsam_unstable/discrete/tests/testScheduler.cpp index 086057a466..3b0528360f 100644 --- a/gtsam_unstable/discrete/tests/testScheduler.cpp +++ b/gtsam_unstable/discrete/tests/testScheduler.cpp @@ -10,11 +10,8 @@ #include #include -#include -#include #include -using namespace boost::assign; using namespace std; using namespace gtsam; diff --git a/gtsam_unstable/discrete/tests/testSudoku.cpp b/gtsam_unstable/discrete/tests/testSudoku.cpp index 8b28581699..a476112222 100644 --- a/gtsam_unstable/discrete/tests/testSudoku.cpp +++ b/gtsam_unstable/discrete/tests/testSudoku.cpp @@ -9,8 +9,6 @@ #include #include -#include -using boost::assign::insert; #include #include @@ -60,14 +58,14 @@ class Sudoku : public CSP { // add row constraints for (size_t i = 0; i < n; i++) { DiscreteKeys dkeys; - for (size_t j = 0; j < n; j++) dkeys += dkey(i, j); + for (size_t j = 0; j < n; j++) dkeys.push_back(dkey(i, j)); addAllDiff(dkeys); } // add col constraints for (size_t j = 0; j < n; j++) { DiscreteKeys dkeys; - for (size_t i = 0; i < n; i++) dkeys += dkey(i, j); + for (size_t i = 0; i < n; i++) dkeys.push_back(dkey(i, j)); addAllDiff(dkeys); } @@ -79,7 +77,7 @@ class Sudoku : public CSP { // Box I,J DiscreteKeys dkeys; for (size_t i = i0; i < i0 + N; i++) - for (size_t j = j0; j < j0 + N; j++) dkeys += dkey(i, j); + for (size_t j = j0; j < j0 + N; j++) dkeys.push_back(dkey(i, j)); addAllDiff(dkeys); j0 += N; } @@ -128,11 +126,14 @@ TEST(Sudoku, small) { // optimize and check auto solution = csp.optimize(); DiscreteValues expected; - insert(expected)(csp.key(0, 0), 0)(csp.key(0, 1), 1)(csp.key(0, 2), 2)( - csp.key(0, 3), 3)(csp.key(1, 0), 2)(csp.key(1, 1), 3)(csp.key(1, 2), 0)( - csp.key(1, 3), 1)(csp.key(2, 0), 3)(csp.key(2, 1), 2)(csp.key(2, 2), 1)( - csp.key(2, 3), 0)(csp.key(3, 0), 1)(csp.key(3, 1), 0)(csp.key(3, 2), 3)( - csp.key(3, 3), 2); + expected = {{csp.key(0, 0), 0}, {csp.key(0, 1), 1}, + {csp.key(0, 2), 2}, {csp.key(0, 3), 3}, // + {csp.key(1, 0), 2}, {csp.key(1, 1), 3}, + {csp.key(1, 2), 0}, {csp.key(1, 3), 1}, // + {csp.key(2, 0), 3}, {csp.key(2, 1), 2}, + {csp.key(2, 2), 1}, {csp.key(2, 3), 0}, // + {csp.key(3, 0), 1}, {csp.key(3, 1), 0}, + {csp.key(3, 2), 3}, {csp.key(3, 3), 2}}; EXPECT(assert_equal(expected, solution)); // csp.printAssignment(solution); diff --git a/gtsam_unstable/dynamics/FullIMUFactor.h b/gtsam_unstable/dynamics/FullIMUFactor.h index 9f00f81d66..dcca226955 100644 --- a/gtsam_unstable/dynamics/FullIMUFactor.h +++ b/gtsam_unstable/dynamics/FullIMUFactor.h @@ -22,9 +22,9 @@ namespace gtsam { * assumed to be PoseRTV */ template -class FullIMUFactor : public NoiseModelFactor2 { +class FullIMUFactor : public NoiseModelFactorN { public: - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef FullIMUFactor This; protected: diff --git a/gtsam_unstable/dynamics/IMUFactor.h b/gtsam_unstable/dynamics/IMUFactor.h index 9a742b4f0e..4eaa3139da 100644 --- a/gtsam_unstable/dynamics/IMUFactor.h +++ b/gtsam_unstable/dynamics/IMUFactor.h @@ -20,9 +20,9 @@ namespace gtsam { * assumed to be PoseRTV */ template -class IMUFactor : public NoiseModelFactor2 { +class IMUFactor : public NoiseModelFactorN { public: - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef IMUFactor This; protected: diff --git a/gtsam_unstable/dynamics/Pendulum.h b/gtsam_unstable/dynamics/Pendulum.h index 1e0ca621cd..d7301a5767 100644 --- a/gtsam_unstable/dynamics/Pendulum.h +++ b/gtsam_unstable/dynamics/Pendulum.h @@ -20,11 +20,11 @@ namespace gtsam { * - For implicit Euler method: q_{k+1} = q_k + h*v_{k+1} * - For sympletic Euler method: q_{k+1} = q_k + h*v_{k+1} */ -class PendulumFactor1: public NoiseModelFactor3 { +class PendulumFactor1: public NoiseModelFactorN { public: protected: - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /** default constructor to allow for serialization */ PendulumFactor1() {} @@ -66,11 +66,11 @@ class PendulumFactor1: public NoiseModelFactor3 { * - For implicit Euler method: v_{k+1} = v_k - h*g/L*sin(q_{k+1}) * - For sympletic Euler method: v_{k+1} = v_k - h*g/L*sin(q_k) */ -class PendulumFactor2: public NoiseModelFactor3 { +class PendulumFactor2: public NoiseModelFactorN { public: protected: - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /** default constructor to allow for serialization */ PendulumFactor2() {} @@ -113,11 +113,11 @@ class PendulumFactor2: public NoiseModelFactor3 { * \f$ p_k = -D_1 L_d(q_k,q_{k+1},h) = \frac{1}{h}mr^{2}\left(q_{k+1}-q_{k}\right)+mgrh(1-\alpha)\,\sin\left((1-\alpha)q_{k}+\alpha q_{k+1}\right) \f$ * \f$ = (1/h)mr^2 (q_{k+1}-q_k) + mgrh(1-alpha) sin ((1-alpha)q_k+\alpha q_{k+1}) \f$ */ -class PendulumFactorPk: public NoiseModelFactor3 { +class PendulumFactorPk: public NoiseModelFactorN { public: protected: - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /** default constructor to allow for serialization */ PendulumFactorPk() {} @@ -169,11 +169,11 @@ class PendulumFactorPk: public NoiseModelFactor3 { * \f$ p_k1 = D_2 L_d(q_k,q_{k+1},h) = \frac{1}{h}mr^{2}\left(q_{k+1}-q_{k}\right)-mgrh\alpha\sin\left((1-\alpha)q_{k}+\alpha q_{k+1}\right) \f$ * \f$ = (1/h)mr^2 (q_{k+1}-q_k) - mgrh alpha sin ((1-alpha)q_k+\alpha q_{k+1}) \f$ */ -class PendulumFactorPk1: public NoiseModelFactor3 { +class PendulumFactorPk1: public NoiseModelFactorN { public: protected: - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /** default constructor to allow for serialization */ PendulumFactorPk1() {} diff --git a/gtsam_unstable/dynamics/SimpleHelicopter.h b/gtsam_unstable/dynamics/SimpleHelicopter.h index 1fb8958337..941b7c7acc 100644 --- a/gtsam_unstable/dynamics/SimpleHelicopter.h +++ b/gtsam_unstable/dynamics/SimpleHelicopter.h @@ -24,10 +24,10 @@ namespace gtsam { * Note: this factor is necessary if one needs to smooth the entire graph. It's not needed * in sequential update method. */ -class Reconstruction : public NoiseModelFactor3 { +class Reconstruction : public NoiseModelFactorN { double h_; // time step - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; public: Reconstruction(Key gKey1, Key gKey, Key xiKey, double h, double mu = 1000.0) : Base(noiseModel::Constrained::All(6, std::abs(mu)), gKey1, gKey, @@ -73,7 +73,7 @@ class Reconstruction : public NoiseModelFactor3 { /** * Implement the Discrete Euler-Poincare' equation: */ -class DiscreteEulerPoincareHelicopter : public NoiseModelFactor3 { +class DiscreteEulerPoincareHelicopter : public NoiseModelFactorN { double h_; /// time step Matrix Inertia_; /// Inertia tensors Inertia = [ J 0; 0 M ] @@ -86,7 +86,7 @@ class DiscreteEulerPoincareHelicopter : public NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; public: DiscreteEulerPoincareHelicopter(Key xiKey1, Key xiKey_1, Key gKey, diff --git a/gtsam_unstable/dynamics/VelocityConstraint.h b/gtsam_unstable/dynamics/VelocityConstraint.h index d24d06e798..0fa3d9cb73 100644 --- a/gtsam_unstable/dynamics/VelocityConstraint.h +++ b/gtsam_unstable/dynamics/VelocityConstraint.h @@ -32,9 +32,9 @@ typedef enum { * NOTE: this approximation is insufficient for large timesteps, but is accurate * if timesteps are small. */ -class VelocityConstraint : public gtsam::NoiseModelFactor2 { +class VelocityConstraint : public gtsam::NoiseModelFactorN { public: - typedef gtsam::NoiseModelFactor2 Base; + typedef gtsam::NoiseModelFactorN Base; protected: diff --git a/gtsam_unstable/dynamics/VelocityConstraint3.h b/gtsam_unstable/dynamics/VelocityConstraint3.h index c9f05cf98f..2880b9c8cd 100644 --- a/gtsam_unstable/dynamics/VelocityConstraint3.h +++ b/gtsam_unstable/dynamics/VelocityConstraint3.h @@ -10,11 +10,11 @@ namespace gtsam { -class VelocityConstraint3 : public NoiseModelFactor3 { +class VelocityConstraint3 : public NoiseModelFactorN { public: protected: - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /** default constructor to allow for serialization */ VelocityConstraint3() {} @@ -53,6 +53,7 @@ class VelocityConstraint3 : public NoiseModelFactor3 { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor3 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor3", boost::serialization::base_object(*this)); } diff --git a/gtsam_unstable/examples/ConcurrentCalibration.cpp b/gtsam_unstable/examples/ConcurrentCalibration.cpp index 95629fb43b..75dc49eeec 100644 --- a/gtsam_unstable/examples/ConcurrentCalibration.cpp +++ b/gtsam_unstable/examples/ConcurrentCalibration.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -35,12 +36,15 @@ using namespace std; using namespace gtsam; +using symbol_shorthand::K; +using symbol_shorthand::L; +using symbol_shorthand::X; int main(int argc, char** argv){ Values initial_estimate; NonlinearFactorGraph graph; - const noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(2,1); + const auto model = noiseModel::Isotropic::Sigma(2,1); string calibration_loc = findExampleDataFile("VO_calibration00s.txt"); string pose_loc = findExampleDataFile("VO_camera_poses00s.txt"); @@ -54,13 +58,13 @@ int main(int argc, char** argv){ calibration_file >> fx >> fy >> s >> u0 >> v0 >> b; //create stereo camera calibration object - const Cal3_S2::shared_ptr K(new Cal3_S2(fx,fy,s,u0,v0)); - const Cal3_S2::shared_ptr noisy_K(new Cal3_S2(fx*1.2,fy*1.2,s,u0-10,v0+10)); + const Cal3_S2 true_K(fx,fy,s,u0,v0); + const Cal3_S2 noisy_K(fx*1.2,fy*1.2,s,u0-10,v0+10); - initial_estimate.insert(Symbol('K', 0), *noisy_K); + initial_estimate.insert(K(0), noisy_K); - noiseModel::Diagonal::shared_ptr calNoise = noiseModel::Diagonal::Sigmas((Vector(5) << 500, 500, 1e-5, 100, 100).finished()); - graph.addPrior(Symbol('K', 0), *noisy_K, calNoise); + auto calNoise = noiseModel::Diagonal::Sigmas((Vector(5) << 500, 500, 1e-5, 100, 100).finished()); + graph.addPrior(K(0), noisy_K, calNoise); ifstream pose_file(pose_loc.c_str()); @@ -75,7 +79,7 @@ int main(int argc, char** argv){ initial_estimate.insert(Symbol('x', pose_id), Pose3(m)); } - noiseModel::Isotropic::shared_ptr poseNoise = noiseModel::Isotropic::Sigma(6, 0.01); + auto poseNoise = noiseModel::Isotropic::Sigma(6, 0.01); graph.addPrior(Symbol('x', pose_id), Pose3(m), poseNoise); // camera and landmark keys @@ -83,22 +87,22 @@ int main(int argc, char** argv){ // pixel coordinates uL, uR, v (same for left/right images due to rectification) // landmark coordinates X, Y, Z in camera frame, resulting from triangulation - double uL, uR, v, X, Y, Z; + double uL, uR, v, _X, Y, Z; ifstream factor_file(factor_loc.c_str()); cout << "Reading stereo factors" << endl; //read stereo measurement details from file and use to create and add GenericStereoFactor objects to the graph representation - while (factor_file >> x >> l >> uL >> uR >> v >> X >> Y >> Z) { -// graph.emplace_shared >(StereoPoint2(uL, uR, v), model, Symbol('x', x), Symbol('l', l), K); + while (factor_file >> x >> l >> uL >> uR >> v >> _X >> Y >> Z) { +// graph.emplace_shared >(StereoPoint2(uL, uR, v), model, X(x), L(l), K); - graph.emplace_shared >(Point2(uL,v), model, Symbol('x', x), Symbol('l', l), Symbol('K', 0)); + graph.emplace_shared >(Point2(uL,v), model, X(x), L(l), K(0)); //if the landmark variable included in this factor has not yet been added to the initial variable value estimate, add it - if (!initial_estimate.exists(Symbol('l', l))) { - Pose3 camPose = initial_estimate.at(Symbol('x', x)); + if (!initial_estimate.exists(L(l))) { + Pose3 camPose = initial_estimate.at(X(x)); //transformFrom() transforms the input Point3 from the camera pose space, camPose, to the global space - Point3 worldPoint = camPose.transformFrom(Point3(X, Y, Z)); - initial_estimate.insert(Symbol('l', l), worldPoint); + Point3 worldPoint = camPose.transformFrom(Point3(_X, Y, Z)); + initial_estimate.insert(L(l), worldPoint); } } @@ -123,7 +127,7 @@ int main(int argc, char** argv){ double currentError; - stream_K << optimizer.iterations() << " " << optimizer.values().at(Symbol('K',0)).vector().transpose() << endl; + stream_K << optimizer.iterations() << " " << optimizer.values().at(K(0)).vector().transpose() << endl; // Iterative loop @@ -132,7 +136,7 @@ int main(int argc, char** argv){ currentError = optimizer.error(); optimizer.iterate(); - stream_K << optimizer.iterations() << " " << optimizer.values().at(Symbol('K',0)).vector().transpose() << endl; + stream_K << optimizer.iterations() << " " << optimizer.values().at(K(0)).vector().transpose() << endl; if(params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << optimizer.error() << endl; } while(optimizer.iterations() < params.maxIterations && @@ -142,13 +146,13 @@ int main(int argc, char** argv){ Values result = optimizer.values(); cout << "Final result sample:" << endl; - Values pose_values = result.filter(); + Values pose_values = utilities::allPose3s(result); pose_values.print("Final camera poses:\n"); - Values(result.filter()).print("Final K\n"); + result.at(K(0)).print("Final K\n"); - noisy_K->print("Initial noisy K\n"); - K->print("Initial correct K\n"); + noisy_K.print("Initial noisy K\n"); + true_K.print("Initial correct K\n"); return 0; } diff --git a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp index 1494d784b3..52a45b6d0f 100644 --- a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp +++ b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp @@ -308,12 +308,12 @@ int main(int argc, char** argv) { // And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds cout << "After 15.0 seconds, each version contains to the following keys:" << endl; cout << " Concurrent Filter Keys: " << endl; - for(const auto key_value: concurrentFilter.getLinearizationPoint()) { - cout << setprecision(5) << " Key: " << key_value.key << endl; + for(const auto key: concurrentFilter.getLinearizationPoint().keys()) { + cout << setprecision(5) << " Key: " << key << endl; } cout << " Concurrent Smoother Keys: " << endl; - for(const auto key_value: concurrentSmoother.getLinearizationPoint()) { - cout << setprecision(5) << " Key: " << key_value.key << endl; + for(const auto key: concurrentSmoother.getLinearizationPoint().keys()) { + cout << setprecision(5) << " Key: " << key << endl; } cout << " Fixed-Lag Smoother Keys: " << endl; for(const auto& key_timestamp: fixedlagSmoother.timestamps()) { diff --git a/gtsam_unstable/examples/SmartProjectionFactorExample.cpp b/gtsam_unstable/examples/SmartProjectionFactorExample.cpp index e290cef7e5..b3b04cca32 100644 --- a/gtsam_unstable/examples/SmartProjectionFactorExample.cpp +++ b/gtsam_unstable/examples/SmartProjectionFactorExample.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -114,7 +115,7 @@ int main(int argc, char** argv){ Values result = optimizer.optimize(); cout << "Final result sample:" << endl; - Values pose_values = result.filter(); + Values pose_values = utilities::allPose3s(result); pose_values.print("Final camera poses:\n"); return 0; diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp index 64afa80309..984637ef08 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp @@ -234,9 +234,11 @@ int main(int argc, char** argv) { } } countK = 0; - for(const auto it: result.filter()) - os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1" - << endl; + for (const auto& key_point : result.extract()) { + auto key = key_point.first; + const Point2 point = key_point.second; + os2 << key << "\t" << point.x() << "\t" << point.y() << "\t1" << endl; + } if (smart) { for(size_t jj: ids) { Point2 landmark = smartFactors[jj]->triangulate(result); @@ -257,9 +259,11 @@ int main(int argc, char** argv) { // Write result to file Values result = isam.calculateEstimate(); ofstream os("rangeResult.txt"); - for(const auto it: result.filter()) - os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t" - << it.value.theta() << endl; + for (const auto& key_pose : result.extract()) { + auto key = key_pose.first; + const Pose2 pose = key_pose.second; + os << key << "\t" << pose.x() << "\t" << pose.y() << "\t" << pose.theta() << endl; + } exit(0); } diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp index 95aff85a7f..b7f1fb1e37 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp @@ -202,13 +202,17 @@ int main(int argc, char** argv) { // Write result to file Values result = isam.calculateEstimate(); ofstream os2("rangeResultLM.txt"); - for(const auto it: result.filter()) - os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1" - << endl; + for (const auto& key_point : result.extract()) { + auto key = key_point.first; + const Point2 point = key_point.second; + os2 << key << "\t" << point.x() << "\t" << point.y() << "\t1" << endl; + } ofstream os("rangeResult.txt"); - for(const auto it: result.filter()) - os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t" - << it.value.theta() << endl; + for (const auto& key_pose : result.extract()) { + auto key = key_pose.first; + const Pose2 pose = key_pose.second; + os << key << "\t" << pose.x() << "\t" << pose.y() << "\t" << pose.theta() << endl; + } exit(0); } diff --git a/gtsam_unstable/examples/SmartStereoProjectionFactorExample.cpp b/gtsam_unstable/examples/SmartStereoProjectionFactorExample.cpp index 2426ca2012..804e4cf5b8 100644 --- a/gtsam_unstable/examples/SmartStereoProjectionFactorExample.cpp +++ b/gtsam_unstable/examples/SmartStereoProjectionFactorExample.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -43,6 +44,7 @@ using namespace std; using namespace gtsam; +using symbol_shorthand::X; int main(int argc, char** argv){ @@ -84,16 +86,16 @@ int main(int argc, char** argv){ if(add_initial_noise){ m(1,3) += (pose_id % 10)/10.0; } - initial_estimate.insert(Symbol('x', pose_id), Pose3(m)); + initial_estimate.insert(X(pose_id), Pose3(m)); } - Values initial_pose_values = initial_estimate.filter(); + const auto initialPoses = initial_estimate.extract(); if (output_poses) { init_pose3Out.open(init_poseOutput.c_str(), ios::out); - for (size_t i = 1; i <= initial_pose_values.size(); i++) { + for (size_t i = 1; i <= initialPoses.size(); i++) { init_pose3Out << i << " " - << initial_pose_values.at(Symbol('x', i)).matrix().format( + << initialPoses.at(X(i)).matrix().format( Eigen::IOFormat(Eigen::StreamPrecision, 0, " ", " ")) << endl; } } @@ -103,7 +105,7 @@ int main(int argc, char** argv){ // pixel coordinates uL, uR, v (same for left/right images due to rectification) // landmark coordinates X, Y, Z in camera frame, resulting from triangulation - double uL, uR, v, X, Y, Z; + double uL, uR, v, _X, Y, Z; ifstream factor_file(factor_loc.c_str()); cout << "Reading stereo factors" << endl; @@ -112,21 +114,21 @@ int main(int argc, char** argv){ SmartFactor::shared_ptr factor(new SmartFactor(model)); size_t current_l = 3; // hardcoded landmark ID from first measurement - while (factor_file >> x >> l >> uL >> uR >> v >> X >> Y >> Z) { + while (factor_file >> x >> l >> uL >> uR >> v >> _X >> Y >> Z) { if(current_l != l) { graph.push_back(factor); factor = SmartFactor::shared_ptr(new SmartFactor(model)); current_l = l; } - factor->add(StereoPoint2(uL,uR,v), Symbol('x',x), K); + factor->add(StereoPoint2(uL,uR,v), X(x), K); } - Pose3 first_pose = initial_estimate.at(Symbol('x',1)); + Pose3 first_pose = initial_estimate.at(X(1)); //constrain the first pose such that it cannot change from its original value during optimization // NOTE: NonlinearEquality forces the optimizer to use QR rather than Cholesky // QR is much slower than Cholesky, but numerically more stable - graph.emplace_shared >(Symbol('x',1),first_pose); + graph.emplace_shared >(X(1),first_pose); LevenbergMarquardtParams params; params.verbosityLM = LevenbergMarquardtParams::TRYLAMBDA; @@ -138,13 +140,13 @@ int main(int argc, char** argv){ Values result = optimizer.optimize(); cout << "Final result sample:" << endl; - Values pose_values = result.filter(); + Values pose_values = utilities::allPose3s(result); pose_values.print("Final camera poses:\n"); if(output_poses){ pose3Out.open(poseOutput.c_str(),ios::out); for(size_t i = 1; i<=pose_values.size(); i++){ - pose3Out << i << " " << pose_values.at(Symbol('x',i)).matrix().format(Eigen::IOFormat(Eigen::StreamPrecision, 0, + pose3Out << i << " " << pose_values.at(X(i)).matrix().format(Eigen::IOFormat(Eigen::StreamPrecision, 0, " ", " ")) << endl; } cout << "Writing output" << endl; diff --git a/gtsam_unstable/gtsam_unstable.i b/gtsam_unstable/gtsam_unstable.i index c6dbd4ab62..6ce7be20ae 100644 --- a/gtsam_unstable/gtsam_unstable.i +++ b/gtsam_unstable/gtsam_unstable.i @@ -21,9 +21,7 @@ virtual class gtsam::noiseModel::Isotropic; virtual class gtsam::imuBias::ConstantBias; virtual class gtsam::NonlinearFactor; virtual class gtsam::NoiseModelFactor; -virtual class gtsam::NoiseModelFactor2; -virtual class gtsam::NoiseModelFactor3; -virtual class gtsam::NoiseModelFactor4; +virtual class gtsam::NoiseModelFactorN; virtual class gtsam::GaussianFactor; virtual class gtsam::HessianFactor; virtual class gtsam::JacobianFactor; @@ -430,8 +428,9 @@ virtual class IMUFactor : gtsam::NoiseModelFactor { Vector gyro() const; Vector accel() const; Vector z() const; - size_t key1() const; - size_t key2() const; + + template + size_t key() const; }; #include @@ -448,8 +447,9 @@ virtual class FullIMUFactor : gtsam::NoiseModelFactor { Vector gyro() const; Vector accel() const; Vector z() const; - size_t key1() const; - size_t key2() const; + + template + size_t key() const; }; #include @@ -733,14 +733,14 @@ class AHRS { // Tectonic SAM Factors #include -//typedef gtsam::NoiseModelFactor2 NLPosePose; +//typedef gtsam::NoiseModelFactorN NLPosePose; virtual class DeltaFactor : gtsam::NoiseModelFactor { DeltaFactor(size_t i, size_t j, const gtsam::Point2& measured, const gtsam::noiseModel::Base* noiseModel); //void print(string s) const; }; -//typedef gtsam::NoiseModelFactor4 NLPosePosePosePoint; virtual class DeltaFactorBase : gtsam::NoiseModelFactor { DeltaFactorBase(size_t b1, size_t i, size_t b2, size_t j, @@ -748,7 +748,7 @@ virtual class DeltaFactorBase : gtsam::NoiseModelFactor { //void print(string s) const; }; -//typedef gtsam::NoiseModelFactor4 NLPosePosePosePose; virtual class OdometryFactorBase : gtsam::NoiseModelFactor { OdometryFactorBase(size_t b1, size_t i, size_t b2, size_t j, diff --git a/gtsam_unstable/linear/tests/testLinearEquality.cpp b/gtsam_unstable/linear/tests/testLinearEquality.cpp index 36a2cacd8e..58af66a090 100644 --- a/gtsam_unstable/linear/tests/testLinearEquality.cpp +++ b/gtsam_unstable/linear/tests/testLinearEquality.cpp @@ -15,38 +15,31 @@ * @author Duy-Nguyen Ta **/ -#include +#include #include #include #include -#include - -#include -#include +#include using namespace std; using namespace gtsam; -using namespace boost::assign; -GTSAM_CONCEPT_TESTABLE_INST (LinearEquality) +GTSAM_CONCEPT_TESTABLE_INST(LinearEquality) namespace { namespace simple { // Terms we'll use -const vector > terms = list_of < pair - > (make_pair(5, Matrix3::Identity()))( - make_pair(10, 2 * Matrix3::Identity()))( - make_pair(15, 3 * Matrix3::Identity())); +const vector > terms{ + make_pair(5, I_3x3), make_pair(10, 2 * I_3x3), make_pair(15, 3 * I_3x3)}; // RHS and sigmas const Vector b = (Vector(3) << 1., 2., 3.).finished(); const SharedDiagonal noise = noiseModel::Constrained::All(3); -} -} +} // namespace simple +} // namespace /* ************************************************************************* */ -TEST(LinearEquality, constructors_and_accessors) -{ +TEST(LinearEquality, constructors_and_accessors) { using namespace simple; // Test for using different numbers of terms @@ -66,8 +59,8 @@ TEST(LinearEquality, constructors_and_accessors) // Two term constructor LinearEquality expected( boost::make_iterator_range(terms.begin(), terms.begin() + 2), b, 0); - LinearEquality actual(terms[0].first, terms[0].second, - terms[1].first, terms[1].second, b, 0); + LinearEquality actual(terms[0].first, terms[0].second, terms[1].first, + terms[1].second, b, 0); EXPECT(assert_equal(expected, actual)); LONGS_EQUAL((long)terms[1].first, (long)actual.keys().back()); EXPECT(assert_equal(terms[1].second, actual.getA(actual.end() - 1))); @@ -79,8 +72,9 @@ TEST(LinearEquality, constructors_and_accessors) // Three term constructor LinearEquality expected( boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, 0); - LinearEquality actual(terms[0].first, terms[0].second, - terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, 0); + LinearEquality actual(terms[0].first, terms[0].second, terms[1].first, + terms[1].second, terms[2].first, terms[2].second, b, + 0); EXPECT(assert_equal(expected, actual)); LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back()); EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1))); @@ -92,39 +86,38 @@ TEST(LinearEquality, constructors_and_accessors) /* ************************************************************************* */ TEST(LinearEquality, Hessian_conversion) { - HessianFactor hessian(0, (Matrix(4,4) << - 1.57, 2.695, -1.1, -2.35, - 2.695, 11.3125, -0.65, -10.225, - -1.1, -0.65, 1, 0.5, - -2.35, -10.225, 0.5, 9.25).finished(), - (Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(), - 73.1725); + HessianFactor hessian( + 0, + (Matrix(4, 4) << 1.57, 2.695, -1.1, -2.35, 2.695, 11.3125, -0.65, -10.225, + -1.1, -0.65, 1, 0.5, -2.35, -10.225, 0.5, 9.25) + .finished(), + (Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(), 73.1725); try { LinearEquality actual(hessian); EXPECT(false); - } - catch (const std::runtime_error& exception) { + } catch (const std::runtime_error& exception) { EXPECT(true); } } /* ************************************************************************* */ -TEST(LinearEquality, error) -{ +TEST(LinearEquality, error) { LinearEquality factor(simple::terms, simple::b, 0); VectorValues values; values.insert(5, Vector::Constant(3, 1.0)); values.insert(10, Vector::Constant(3, 0.5)); - values.insert(15, Vector::Constant(3, 1.0/3.0)); + values.insert(15, Vector::Constant(3, 1.0 / 3.0)); - Vector expected_unwhitened(3); expected_unwhitened << 2.0, 1.0, 0.0; + Vector expected_unwhitened(3); + expected_unwhitened << 2.0, 1.0, 0.0; Vector actual_unwhitened = factor.unweighted_error(values); EXPECT(assert_equal(expected_unwhitened, actual_unwhitened)); // whitened is meaningless in constraints - Vector expected_whitened(3); expected_whitened = expected_unwhitened; + Vector expected_whitened(3); + expected_whitened = expected_unwhitened; Vector actual_whitened = factor.error_vector(values); EXPECT(assert_equal(expected_whitened, actual_whitened)); @@ -134,13 +127,13 @@ TEST(LinearEquality, error) } /* ************************************************************************* */ -TEST(LinearEquality, matrices_NULL) -{ +TEST(LinearEquality, matrices_NULL) { // Make sure everything works with nullptr noise model LinearEquality factor(simple::terms, simple::b, 0); Matrix AExpected(3, 9); - AExpected << simple::terms[0].second, simple::terms[1].second, simple::terms[2].second; + AExpected << simple::terms[0].second, simple::terms[1].second, + simple::terms[2].second; Vector rhsExpected = simple::b; Matrix augmentedJacobianExpected(3, 10); augmentedJacobianExpected << AExpected, rhsExpected; @@ -153,24 +146,25 @@ TEST(LinearEquality, matrices_NULL) // Unwhitened Jacobian EXPECT(assert_equal(AExpected, factor.jacobianUnweighted().first)); EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second)); - EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted())); + EXPECT(assert_equal(augmentedJacobianExpected, + factor.augmentedJacobianUnweighted())); } /* ************************************************************************* */ -TEST(LinearEquality, matrices) -{ +TEST(LinearEquality, matrices) { // And now witgh a non-unit noise model LinearEquality factor(simple::terms, simple::b, 0); Matrix jacobianExpected(3, 9); - jacobianExpected << simple::terms[0].second, simple::terms[1].second, simple::terms[2].second; + jacobianExpected << simple::terms[0].second, simple::terms[1].second, + simple::terms[2].second; Vector rhsExpected = simple::b; Matrix augmentedJacobianExpected(3, 10); augmentedJacobianExpected << jacobianExpected, rhsExpected; Matrix augmentedHessianExpected = - augmentedJacobianExpected.transpose() * simple::noise->R().transpose() - * simple::noise->R() * augmentedJacobianExpected; + augmentedJacobianExpected.transpose() * simple::noise->R().transpose() * + simple::noise->R() * augmentedJacobianExpected; // Whitened Jacobian EXPECT(assert_equal(jacobianExpected, factor.jacobian().first)); @@ -180,35 +174,37 @@ TEST(LinearEquality, matrices) // Unwhitened Jacobian EXPECT(assert_equal(jacobianExpected, factor.jacobianUnweighted().first)); EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second)); - EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted())); + EXPECT(assert_equal(augmentedJacobianExpected, + factor.augmentedJacobianUnweighted())); } /* ************************************************************************* */ -TEST(LinearEquality, operators ) -{ +TEST(LinearEquality, operators) { Matrix I = I_2x2; - Vector b = (Vector(2) << 0.2,-0.1).finished(); + Vector b = (Vector(2) << 0.2, -0.1).finished(); LinearEquality lf(1, -I, 2, I, b, 0); VectorValues c; - c.insert(1, (Vector(2) << 10.,20.).finished()); - c.insert(2, (Vector(2) << 30.,60.).finished()); + c.insert(1, (Vector(2) << 10., 20.).finished()); + c.insert(2, (Vector(2) << 30., 60.).finished()); // test A*x - Vector expectedE = (Vector(2) << 20.,40.).finished(); + Vector expectedE = (Vector(2) << 20., 40.).finished(); Vector actualE = lf * c; EXPECT(assert_equal(expectedE, actualE)); // test A^e VectorValues expectedX; - expectedX.insert(1, (Vector(2) << -20.,-40.).finished()); + expectedX.insert(1, (Vector(2) << -20., -40.).finished()); expectedX.insert(2, (Vector(2) << 20., 40.).finished()); VectorValues actualX = VectorValues::Zero(expectedX); lf.transposeMultiplyAdd(1.0, actualE, actualX); EXPECT(assert_equal(expectedX, actualX)); // test gradient at zero - Matrix A; Vector b2; boost::tie(A,b2) = lf.jacobian(); + Matrix A; + Vector b2; + boost::tie(A, b2) = lf.jacobian(); VectorValues expectedG; expectedG.insert(1, (Vector(2) << 0.2, -0.1).finished()); expectedG.insert(2, (Vector(2) << -0.2, 0.1).finished()); @@ -217,16 +213,14 @@ TEST(LinearEquality, operators ) } /* ************************************************************************* */ -TEST(LinearEquality, default_error ) -{ +TEST(LinearEquality, default_error) { LinearEquality f; double actual = f.error(VectorValues()); DOUBLES_EQUAL(0.0, actual, 1e-15); } //* ************************************************************************* */ -TEST(LinearEquality, empty ) -{ +TEST(LinearEquality, empty) { // create an empty factor LinearEquality f; EXPECT(f.empty()); diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp index fd18e7c6dc..f5280ceff4 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp @@ -59,8 +59,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update( // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for (const auto key_value : newTheta) { - ordering_.push_back(key_value.key); + for (const auto key : newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta delta_.insert(newTheta.zeroVectors()); @@ -161,8 +161,8 @@ void BatchFixedLagSmoother::eraseKeys(const KeyVector& keys) { factorIndex_.erase(key); // Erase the key from the set of linearized keys - if (linearKeys_.exists(key)) { - linearKeys_.erase(key); + if (linearValues_.exists(key)) { + linearValues_.erase(key); } } @@ -186,8 +186,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Create output result structure Result result; - result.nonlinearVariables = theta_.size() - linearKeys_.size(); - result.linearVariables = linearKeys_.size(); + result.nonlinearVariables = theta_.size() - linearValues_.size(); + result.linearVariables = linearValues_.size(); // Set optimization parameters double lambda = parameters_.lambdaInitial; @@ -265,10 +265,10 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Reset the deltas to zeros delta_.setZero(); // Put the linearization points and deltas back for specific variables - if (enforceConsistency_ && (linearKeys_.size() > 0)) { - theta_.update(linearKeys_); - for(const auto key_value: linearKeys_) { - delta_.at(key_value.key) = newDelta.at(key_value.key); + if (enforceConsistency_ && (linearValues_.size() > 0)) { + theta_.update(linearValues_); + for(const auto key: linearValues_.keys()) { + delta_.at(key) = newDelta.at(key); } } // Decrease lambda for next time @@ -282,9 +282,12 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Reject this change if (lambda >= lambdaUpperBound) { // The maximum lambda has been used. Print a warning and end the search. - cout + if(parameters_.verbosity >= NonlinearOptimizerParams::TERMINATION + || parameters_.verbosityLM == LevenbergMarquardtParams::SUMMARY) { + cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; + } break; } else { // Increase lambda and continue searching diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h index 94cf130cfa..79bd22f9da 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h @@ -136,8 +136,8 @@ class GTSAM_UNSTABLE_EXPORT BatchFixedLagSmoother : public FixedLagSmoother { /** The current linearization point **/ Values theta_; - /** The set of keys involved in current linear factors. These keys should not be relinearized. **/ - Values linearKeys_; + /** The set of values involved in current linear factors. **/ + Values linearValues_; /** The current ordering */ Ordering ordering_; diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp index 83d0ab719f..a31a759205 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp @@ -139,8 +139,8 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const auto key_value: newTheta) { - ordering_.push_back(key_value.key); + for (const auto key : newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta delta_.insert(newTheta.zeroVectors()); @@ -221,10 +221,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm if(debug) { PrintNonlinearFactorGraph(smootherSummarization_, "ConcurrentBatchFilter::synchronize ", "Updated Smoother Summarization:", DefaultKeyFormatter); } // Find the set of new separator keys - KeySet newSeparatorKeys; - for(const auto key_value: separatorValues_) { - newSeparatorKeys.insert(key_value.key); - } + const KeySet newSeparatorKeys = separatorValues_.keySet(); if(debug) { PrintKeys(newSeparatorKeys, "ConcurrentBatchFilter::synchronize ", "Current Separator Keys:"); } @@ -236,9 +233,9 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm graph.push_back(smootherShortcut_); Values values; values.insert(smootherSummarizationValues); - for(const auto key_value: separatorValues_) { - if(!values.exists(key_value.key)) { - values.insert(key_value.key, key_value.value); + for(const auto key: newSeparatorKeys) { + if(!values.exists(key)) { + values.insert(key, separatorValues_.at(key)); } } // Calculate the summarized factor on just the new separator keys @@ -471,8 +468,8 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values // Put the linearization points and deltas back for specific variables if(linearValues.size() > 0) { theta.update(linearValues); - for(const auto key_value: linearValues) { - delta.at(key_value.key) = newDelta.at(key_value.key); + for(const auto key: linearValues.keys()) { + delta.at(key) = newDelta.at(key); } } @@ -574,9 +571,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList& keysToMove) { // Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove KeySet newSeparatorKeys = removedFactors.keys(); - for(const auto key_value: separatorValues_) { - newSeparatorKeys.insert(key_value.key); - } + newSeparatorKeys.merge(separatorValues_.keySet()); for(Key key: keysToMove) { newSeparatorKeys.erase(key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp index 75d491bde7..94a7d4c220 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp @@ -61,8 +61,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const auto key_value: newTheta) { - ordering_.push_back(key_value.key); + for(const auto key: newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta @@ -135,26 +135,30 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa removeFactors(filterSummarizationSlots_); // Insert new linpoints into the values, augment the ordering, and store new dims to augment delta - for(const auto key_value: smootherValues) { - std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); - if(iter_inserted.second) { - // If the insert succeeded - ordering_.push_back(key_value.key); - delta_.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for(const auto key: smootherValues.keys()) { + if(!theta_.exists(key)) { + // If this a new key for theta_, also add to ordering and delta. + const auto& value = smootherValues.at(key); + delta_.insert(key, Vector::Zero(value.dim())); + theta_.insert(key, value); + ordering_.push_back(key); } else { - // If the element already existed in theta_ - iter_inserted.first->value = key_value.value; + // If the key already existed in theta_, just update. + const auto& value = smootherValues.at(key); + theta_.update(key, value); } } - for(const auto key_value: separatorValues) { - std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); - if(iter_inserted.second) { - // If the insert succeeded - ordering_.push_back(key_value.key); - delta_.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for(const auto key: separatorValues.keys()) { + if(!theta_.exists(key)) { + // If this a new key for theta_, also add to ordering and delta. + const auto& value = separatorValues.at(key); + delta_.insert(key, Vector::Zero(value.dim())); + theta_.insert(key, value); + ordering_.push_back(key); } else { - // If the element already existed in theta_ - iter_inserted.first->value = key_value.value; + // If the key already existed in theta_, just update. + const auto& value = separatorValues.at(key); + theta_.update(key, value); } } @@ -322,8 +326,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() { // Put the linearization points and deltas back for specific variables if(separatorValues_.size() > 0) { theta_.update(separatorValues_); - for(const auto key_value: separatorValues_) { - delta_.at(key_value.key) = newDelta.at(key_value.key); + for(const auto key: separatorValues_.keys()) { + delta_.at(key) = newDelta.at(key); } } @@ -371,10 +375,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() { } // Get the set of separator keys - gtsam::KeySet separatorKeys; - for(const auto key_value: separatorValues_) { - separatorKeys.insert(key_value.key); - } + const KeySet separatorKeys = separatorValues_.keySet(); // Calculate the marginal factors on the separator smootherSummarization_ = internal::calculateMarginalFactors(graph, theta_, separatorKeys, parameters_.getEliminationFunction()); diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp index b9cf66a976..947c02cc02 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp @@ -69,14 +69,14 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No int group = 1; // Set all existing variables to Group1 if(isam2_.getLinearizationPoint().size() > 0) { - for(const auto key_value: isam2_.getLinearizationPoint()) { - orderingConstraints->operator[](key_value.key) = group; + for(const auto key: isam2_.getLinearizationPoint().keys()) { + orderingConstraints->operator[](key) = group; } ++group; } // Assign new variables to the root - for(const auto key_value: newTheta) { - orderingConstraints->operator[](key_value.key) = group; + for(const auto key: newTheta.keys()) { + orderingConstraints->operator[](key) = group; } // Set marginalizable variables to Group0 for(Key key: *keysToMove){ @@ -201,8 +201,8 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth // Force iSAM2 not to relinearize anything during this iteration FastList noRelinKeys; - for(const auto key_value: isam2_.getLinearizationPoint()) { - noRelinKeys.push_back(key_value.key); + for(const auto key: isam2_.getLinearizationPoint().keys()) { + noRelinKeys.push_back(key); } // Calculate the summarized factor on just the new separator keys diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp index 3886d0e422..b82b080482 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp @@ -66,9 +66,9 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons // Also, mark the separator keys as fixed linearization points FastMap constrainedKeys; FastList noRelinKeys; - for(const auto key_value: separatorValues_) { - constrainedKeys[key_value.key] = 1; - noRelinKeys.push_back(key_value.key); + for (const auto key : separatorValues_.keys()) { + constrainedKeys[key] = 1; + noRelinKeys.push_back(key); } // Use iSAM2 to perform an update @@ -82,14 +82,14 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons Values values(newTheta); // Unfortunately, we must be careful here, as some of the smoother values // and/or separator values may have been added previously - for(const auto key_value: smootherValues_) { - if(!isam2_.getLinearizationPoint().exists(key_value.key)) { - values.insert(key_value.key, smootherValues_.at(key_value.key)); + for(const auto key: smootherValues_.keys()) { + if(!isam2_.getLinearizationPoint().exists(key)) { + values.insert(key, smootherValues_.at(key)); } } - for(const auto key_value: separatorValues_) { - if(!isam2_.getLinearizationPoint().exists(key_value.key)) { - values.insert(key_value.key, separatorValues_.at(key_value.key)); + for(const auto key: separatorValues_.keys()) { + if(!isam2_.getLinearizationPoint().exists(key)) { + values.insert(key, separatorValues_.at(key)); } } @@ -245,10 +245,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() { } // Get the set of separator keys - KeySet separatorKeys; - for(const auto key_value: separatorValues_) { - separatorKeys.insert(key_value.key); - } + const KeySet separatorKeys = separatorValues_.keySet(); // Calculate the marginal factors on the separator smootherSummarization_ = internal::calculateMarginalFactors(graph, isam2_.getLinearizationPoint(), separatorKeys, diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp index 61db051673..15038c23fc 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp @@ -64,8 +64,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph - KeysToKeep.insert(key_value.key); + for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph + KeysToKeep.insert(key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { KeysToKeep.erase(key); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp index b5fb614285..a2733d509c 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp @@ -560,8 +560,8 @@ TEST( ConcurrentBatchSmoother, synchronize_3 ) GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues); KeySet eliminateKeys = linearFactors->keys(); - for(const auto key_value: filterSeparatorValues) { - eliminateKeys.erase(key_value.key); + for(const auto key: filterSeparatorValues.keys()) { + eliminateKeys.erase(key); } KeyVector variables(eliminateKeys.begin(), eliminateKeys.end()); GaussianFactorGraph result = *linearFactors->eliminatePartialMultifrontal(variables, EliminateCholesky).second; diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp index 08d71a4202..7c6a082781 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp @@ -80,8 +80,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph - KeysToKeep.insert(key_value.key); + for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph + KeysToKeep.insert(key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { KeysToKeep.erase(key); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp index ccb5a104e3..0e01112eb5 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp @@ -512,8 +512,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const auto key_value: filterSeparatorValues) { - actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); + for(const auto key: filterSeparatorValues.keys()) { + actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); } @@ -580,8 +580,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const auto key_value: filterSeparatorValues) { - allkeys.erase(key_value.key); + for(const auto key: filterSeparatorValues.keys()) { + allkeys.erase(key); } KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp index 53c3d16103..a33fcc4811 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp @@ -513,8 +513,8 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const auto key_value: filterSeparatorValues) { - actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); + for(const auto key: filterSeparatorValues.keys()) { + actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); } @@ -582,8 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const auto key_value: filterSeparatorValues) - allkeys.erase(key_value.key); + for (const auto key : filterSeparatorValues.keys()) allkeys.erase(key); KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); diff --git a/gtsam_unstable/partition/tests/testFindSeparator.cpp b/gtsam_unstable/partition/tests/testFindSeparator.cpp index fe49de9289..49796f80d0 100644 --- a/gtsam_unstable/partition/tests/testFindSeparator.cpp +++ b/gtsam_unstable/partition/tests/testFindSeparator.cpp @@ -6,10 +6,6 @@ * Description: unit tests for FindSeparator */ -#include // for operator += -#include // for operator += -#include // for operator += -using namespace boost::assign; #include #include @@ -30,16 +26,16 @@ TEST ( Partition, separatorPartitionByMetis ) graph.push_back(boost::make_shared(2, NODE_POSE_2D, 4, NODE_LANDMARK_2D)); graph.push_back(boost::make_shared(0, NODE_POSE_2D, 1, NODE_POSE_2D)); graph.push_back(boost::make_shared(1, NODE_POSE_2D, 2, NODE_POSE_2D)); - std::vector keys; keys += 0, 1, 2, 3, 4; + std::vector keys{0, 1, 2, 3, 4}; WorkSpace workspace(5); boost::optional actual = separatorPartitionByMetis(graph, keys, workspace, true); CHECK(actual.is_initialized()); - vector A_expected; A_expected += 0, 3; // frontal - vector B_expected; B_expected += 2, 4; // frontal - vector C_expected; C_expected += 1; // separator + vector A_expected{0, 3}; // frontal + vector B_expected{2, 4}; // frontal + vector C_expected{1}; // separator CHECK(A_expected == actual->A); CHECK(B_expected == actual->B); CHECK(C_expected == actual->C); @@ -55,16 +51,16 @@ TEST ( Partition, separatorPartitionByMetis2 ) graph.push_back(boost::make_shared(3, NODE_POSE_2D, 6, NODE_LANDMARK_2D)); graph.push_back(boost::make_shared(1, NODE_POSE_2D, 2, NODE_POSE_2D)); graph.push_back(boost::make_shared(2, NODE_POSE_2D, 3, NODE_POSE_2D)); - std::vector keys; keys += 1, 2, 3, 5, 6; + std::vector keys{1, 2, 3, 5, 6}; WorkSpace workspace(8); boost::optional actual = separatorPartitionByMetis(graph, keys, workspace, true); CHECK(actual.is_initialized()); - vector A_expected; A_expected += 1, 5; // frontal - vector B_expected; B_expected += 3, 6; // frontal - vector C_expected; C_expected += 2; // separator + vector A_expected{1, 5}; // frontal + vector B_expected{3, 6}; // frontal + vector C_expected{2}; // separator CHECK(A_expected == actual->A); CHECK(B_expected == actual->B); CHECK(C_expected == actual->C); @@ -78,15 +74,15 @@ TEST ( Partition, edgePartitionByMetis ) graph.push_back(boost::make_shared(0, 1, 0, NODE_POSE_3D, NODE_POSE_3D)); graph.push_back(boost::make_shared(1, 2, 1, NODE_POSE_3D, NODE_POSE_3D)); graph.push_back(boost::make_shared(2, 3, 2, NODE_POSE_3D, NODE_POSE_3D)); - std::vector keys; keys += 0, 1, 2, 3; + std::vector keys{0, 1, 2, 3}; WorkSpace workspace(6); boost::optional actual = edgePartitionByMetis(graph, keys, workspace, true); CHECK(actual.is_initialized()); - vector A_expected; A_expected += 0, 1; // frontal - vector B_expected; B_expected += 2, 3; // frontal + vector A_expected{0, 1}; // frontal + vector B_expected{2, 3}; // frontal vector C_expected; // separator // for(const size_t a: actual->A) // cout << a << " "; @@ -109,14 +105,14 @@ TEST ( Partition, edgePartitionByMetis2 ) graph.push_back(boost::make_shared(1, 2, 1, NODE_POSE_3D, NODE_POSE_3D, 1)); graph.push_back(boost::make_shared(2, 3, 2, NODE_POSE_3D, NODE_POSE_3D, 20)); graph.push_back(boost::make_shared(3, 4, 3, NODE_POSE_3D, NODE_POSE_3D, 1)); - std::vector keys; keys += 0, 1, 2, 3, 4; + std::vector keys{0, 1, 2, 3, 4}; WorkSpace workspace(6); boost::optional actual = edgePartitionByMetis(graph, keys, workspace, true); CHECK(actual.is_initialized()); - vector A_expected; A_expected += 0, 1; // frontal - vector B_expected; B_expected += 2, 3, 4; // frontal + vector A_expected{0, 1}; // frontal + vector B_expected{2, 3, 4}; // frontal vector C_expected; // separator CHECK(A_expected == actual->A); CHECK(B_expected == actual->B); @@ -133,7 +129,7 @@ TEST ( Partition, findSeparator ) graph.push_back(boost::make_shared(2, NODE_POSE_2D, 4, NODE_LANDMARK_2D)); graph.push_back(boost::make_shared(0, NODE_POSE_2D, 1, NODE_POSE_2D)); graph.push_back(boost::make_shared(1, NODE_POSE_2D, 2, NODE_POSE_2D)); - std::vector keys; keys += 0, 1, 2, 3, 4; + std::vector keys{0, 1, 2, 3, 4}; WorkSpace workspace(5); int minNodesPerMap = -1; @@ -159,7 +155,7 @@ TEST ( Partition, findSeparator2 ) graph.push_back(boost::make_shared(3, NODE_POSE_2D, 6, NODE_LANDMARK_2D)); graph.push_back(boost::make_shared(1, NODE_POSE_2D, 2, NODE_POSE_2D)); graph.push_back(boost::make_shared(2, NODE_POSE_2D, 3, NODE_POSE_2D)); - std::vector keys; keys += 1, 2, 3, 5, 6; + std::vector keys{1, 2, 3, 5, 6}; WorkSpace workspace(8); int minNodesPerMap = -1; diff --git a/gtsam_unstable/partition/tests/testGenericGraph.cpp b/gtsam_unstable/partition/tests/testGenericGraph.cpp index 8a32837f4c..dc7d260351 100644 --- a/gtsam_unstable/partition/tests/testGenericGraph.cpp +++ b/gtsam_unstable/partition/tests/testGenericGraph.cpp @@ -10,10 +10,6 @@ #include -#include // for operator += -#include // for operator += -#include // for operator += -using namespace boost::assign; #include #include @@ -44,13 +40,13 @@ TEST ( GenerciGraph, findIslands ) graph.push_back(boost::make_shared(2, NODE_POSE_2D, 3, NODE_POSE_2D)); graph.push_back(boost::make_shared(4, NODE_POSE_2D, 5, NODE_POSE_2D)); graph.push_back(boost::make_shared(5, NODE_POSE_2D, 6, NODE_POSE_2D)); - std::vector keys; keys += 1, 2, 3, 4, 5, 6, 7, 8, 9; + std::vector keys{1, 2, 3, 4, 5, 6, 7, 8, 9}; WorkSpace workspace(10); // from 0 to 9 list > islands = findIslands(graph, keys, workspace, 7, 2); LONGS_EQUAL(2, islands.size()); - vector island1; island1 += 1, 2, 3, 7, 8; - vector island2; island2 += 4, 5, 6, 9; + vector island1{1, 2, 3, 7, 8}; + vector island2{4, 5, 6, 9}; CHECK(island1 == islands.front()); CHECK(island2 == islands.back()); } @@ -77,12 +73,12 @@ TEST( GenerciGraph, findIslands2 ) graph.push_back(boost::make_shared(2, NODE_POSE_2D, 3, NODE_POSE_2D)); graph.push_back(boost::make_shared(4, NODE_POSE_2D, 5, NODE_POSE_2D)); graph.push_back(boost::make_shared(5, NODE_POSE_2D, 6, NODE_POSE_2D)); - std::vector keys; keys += 1, 2, 3, 4, 5, 6, 7, 8; + std::vector keys{1, 2, 3, 4, 5, 6, 7, 8}; WorkSpace workspace(15); // from 0 to 8, but testing over-allocation here list > islands = findIslands(graph, keys, workspace, 7, 2); LONGS_EQUAL(1, islands.size()); - vector island1; island1 += 1, 2, 3, 4, 5, 6, 7, 8; + vector island1{1, 2, 3, 4, 5, 6, 7, 8}; CHECK(island1 == islands.front()); } @@ -99,13 +95,13 @@ TEST ( GenerciGraph, findIslands3 ) graph.push_back(boost::make_shared(2, NODE_POSE_2D, 3, NODE_POSE_2D)); graph.push_back(boost::make_shared(3, NODE_POSE_2D, 4, NODE_POSE_2D)); - std::vector keys; keys += 1, 2, 3, 4, 5, 6; + std::vector keys{1, 2, 3, 4, 5, 6}; WorkSpace workspace(7); // from 0 to 9 list > islands = findIslands(graph, keys, workspace, 7, 2); LONGS_EQUAL(2, islands.size()); - vector island1; island1 += 1, 5; - vector island2; island2 += 2, 3, 4, 6; + vector island1{1, 5}; + vector island2{2, 3, 4, 6}; CHECK(island1 == islands.front()); CHECK(island2 == islands.back()); } @@ -119,13 +115,13 @@ TEST ( GenerciGraph, findIslands4 ) GenericGraph2D graph; graph.push_back(boost::make_shared(3, NODE_POSE_2D, 4, NODE_LANDMARK_2D)); graph.push_back(boost::make_shared(7, NODE_POSE_2D, 7, NODE_LANDMARK_2D)); - std::vector keys; keys += 3, 4, 7; + std::vector keys{3, 4, 7}; WorkSpace workspace(8); // from 0 to 7 list > islands = findIslands(graph, keys, workspace, 7, 2); LONGS_EQUAL(2, islands.size()); - vector island1; island1 += 3, 4; - vector island2; island2 += 7; + vector island1{3, 4}; + vector island2{7}; CHECK(island1 == islands.front()); CHECK(island2 == islands.back()); } @@ -147,13 +143,13 @@ TEST ( GenerciGraph, findIslands5 ) graph.push_back(boost::make_shared(1, NODE_POSE_2D, 3, NODE_POSE_2D)); graph.push_back(boost::make_shared(2, NODE_POSE_2D, 4, NODE_POSE_2D)); - std::vector keys; keys += 1, 2, 3, 4, 5; + std::vector keys{1, 2, 3, 4, 5}; WorkSpace workspace(6); // from 0 to 5 list > islands = findIslands(graph, keys, workspace, 7, 2); LONGS_EQUAL(2, islands.size()); - vector island1; island1 += 1, 3, 5; - vector island2; island2 += 2, 4; + vector island1{1, 3, 5}; + vector island2{2, 4}; CHECK(island1 == islands.front()); CHECK(island2 == islands.back()); } @@ -235,8 +231,8 @@ TEST ( GenericGraph, reduceGenericGraph2 ) /* ************************************************************************* */ TEST ( GenerciGraph, hasCommonCamera ) { - std::set cameras1; cameras1 += 1, 2, 3, 4, 5; - std::set cameras2; cameras2 += 8, 7, 6, 5; + std::set cameras1{1, 2, 3, 4, 5}; + std::set cameras2{8, 7, 6, 5}; bool actual = hasCommonCamera(cameras1, cameras2); CHECK(actual); } @@ -244,8 +240,8 @@ TEST ( GenerciGraph, hasCommonCamera ) /* ************************************************************************* */ TEST ( GenerciGraph, hasCommonCamera2 ) { - std::set cameras1; cameras1 += 1, 3, 5, 7; - std::set cameras2; cameras2 += 2, 4, 6, 8, 10; + std::set cameras1{1, 3, 5, 7}; + std::set cameras2{2, 4, 6, 8, 10}; bool actual = hasCommonCamera(cameras1, cameras2); CHECK(!actual); } diff --git a/gtsam_unstable/partition/tests/testNestedDissection.cpp b/gtsam_unstable/partition/tests/testNestedDissection.cpp index acaa5557e0..36ce647c30 100644 --- a/gtsam_unstable/partition/tests/testNestedDissection.cpp +++ b/gtsam_unstable/partition/tests/testNestedDissection.cpp @@ -6,10 +6,6 @@ * Description: unit tests for NestedDissection */ -#include // for operator += -#include // for operator += -#include // for operator += -using namespace boost::assign; #include #include diff --git a/gtsam_unstable/slam/BetweenFactorEM.h b/gtsam_unstable/slam/BetweenFactorEM.h index 848abc9cc5..ea1ce0d431 100644 --- a/gtsam_unstable/slam/BetweenFactorEM.h +++ b/gtsam_unstable/slam/BetweenFactorEM.h @@ -73,7 +73,7 @@ class BetweenFactorEM: public NonlinearFactor { const SharedGaussian& model_inlier, const SharedGaussian& model_outlier, const double prior_inlier, const double prior_outlier, const bool flag_bump_up_near_zero_probs = false) : - Base(cref_list_of<2>(key1)(key2)), key1_(key1), key2_(key2), measured_( + Base(KeyVector{key1, key2}), key1_(key1), key2_(key2), measured_( measured), model_inlier_(model_inlier), model_outlier_(model_outlier), prior_inlier_( prior_inlier), prior_outlier_(prior_outlier), flag_bump_up_near_zero_probs_( flag_bump_up_near_zero_probs) { diff --git a/gtsam_unstable/slam/BiasedGPSFactor.h b/gtsam_unstable/slam/BiasedGPSFactor.h index 250c15b830..07706a6a5d 100644 --- a/gtsam_unstable/slam/BiasedGPSFactor.h +++ b/gtsam_unstable/slam/BiasedGPSFactor.h @@ -27,12 +27,12 @@ namespace gtsam { * common-mode errors and that can be partially corrected if other sensors are used * @ingroup slam */ - class BiasedGPSFactor: public NoiseModelFactor2 { + class BiasedGPSFactor: public NoiseModelFactorN { private: typedef BiasedGPSFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; Point3 measured_; /** The measurement */ @@ -57,8 +57,8 @@ namespace gtsam { /** print */ void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "BiasedGPSFactor(" - << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << ")\n" + << keyFormatter(this->key<1>()) << "," + << keyFormatter(this->key<2>()) << ")\n" << " measured: " << measured_.transpose() << std::endl; this->noiseModel_->print(" noise model: "); } @@ -78,9 +78,9 @@ namespace gtsam { if (H1 || H2){ H1->resize(3,6); // jacobian wrt pose - (*H1) << Matrix3::Zero(), pose.rotation().matrix(); + (*H1) << Z_3x3, pose.rotation().matrix(); H2->resize(3,3); // jacobian wrt bias - (*H2) << Matrix3::Identity(); + (*H2) << I_3x3; } return pose.translation() + bias - measured_; } @@ -96,6 +96,7 @@ namespace gtsam { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam_unstable/slam/DummyFactor.cpp b/gtsam_unstable/slam/DummyFactor.cpp index df6b1e50dc..f1897cf855 100644 --- a/gtsam_unstable/slam/DummyFactor.cpp +++ b/gtsam_unstable/slam/DummyFactor.cpp @@ -7,15 +7,11 @@ #include -#include - -using namespace boost::assign; - namespace gtsam { /* ************************************************************************* */ DummyFactor::DummyFactor(const Key& key1, size_t dim1, const Key& key2, size_t dim2) -: NonlinearFactor(cref_list_of<2>(key1)(key2)) +: NonlinearFactor(KeyVector{key1, key2}) { dims_.push_back(dim1); dims_.push_back(dim2); diff --git a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h index cabbfdbe85..e52837fb4e 100644 --- a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h +++ b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h @@ -88,12 +88,12 @@ namespace gtsam { */ template -class EquivInertialNavFactor_GlobalVel : public NoiseModelFactor5 { +class EquivInertialNavFactor_GlobalVel : public NoiseModelFactorN { private: typedef EquivInertialNavFactor_GlobalVel This; - typedef NoiseModelFactor5 Base; + typedef NoiseModelFactorN Base; Vector delta_pos_in_t0_; Vector delta_vel_in_t0_; diff --git a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h index b053b13f82..6423fabda3 100644 --- a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h +++ b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h @@ -87,12 +87,12 @@ namespace gtsam { */ template -class EquivInertialNavFactor_GlobalVel_NoBias : public NoiseModelFactor4 { +class EquivInertialNavFactor_GlobalVel_NoBias : public NoiseModelFactorN { private: typedef EquivInertialNavFactor_GlobalVel_NoBias This; - typedef NoiseModelFactor4 Base; + typedef NoiseModelFactorN Base; Vector delta_pos_in_t0_; Vector delta_vel_in_t0_; @@ -136,10 +136,10 @@ class EquivInertialNavFactor_GlobalVel_NoBias : public NoiseModelFactor4key1()) << "," - << keyFormatter(this->key2()) << "," - << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "\n"; + << keyFormatter(this->key<1>()) << "," + << keyFormatter(this->key<2>()) << "," + << keyFormatter(this->key<3>()) << "," + << keyFormatter(this->key<4>()) << "\n"; std::cout << "delta_pos_in_t0: " << this->delta_pos_in_t0_.transpose() << std::endl; std::cout << "delta_vel_in_t0: " << this->delta_vel_in_t0_.transpose() << std::endl; std::cout << "delta_angles: " << this->delta_angles_ << std::endl; diff --git a/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h b/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h index 52e4f44cb3..5d677b82e6 100644 --- a/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h +++ b/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h @@ -42,12 +42,12 @@ namespace gtsam { * T is the measurement type, by default the same */ template -class GaussMarkov1stOrderFactor: public NoiseModelFactor2 { +class GaussMarkov1stOrderFactor: public NoiseModelFactorN { private: typedef GaussMarkov1stOrderFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; double dt_; Vector tau_; diff --git a/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h b/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h index 0828fbd088..fd3ab729d1 100644 --- a/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h +++ b/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h @@ -77,12 +77,12 @@ namespace gtsam { * vehicle */ template -class InertialNavFactor_GlobalVelocity : public NoiseModelFactor5 { +class InertialNavFactor_GlobalVelocity : public NoiseModelFactorN { private: typedef InertialNavFactor_GlobalVelocity This; - typedef NoiseModelFactor5 Base; + typedef NoiseModelFactorN Base; Vector measurement_acc_; Vector measurement_gyro_; diff --git a/gtsam_unstable/slam/InvDepthFactor3.h b/gtsam_unstable/slam/InvDepthFactor3.h index 44d3b8fd04..2d0d57437f 100644 --- a/gtsam_unstable/slam/InvDepthFactor3.h +++ b/gtsam_unstable/slam/InvDepthFactor3.h @@ -24,7 +24,7 @@ namespace gtsam { * Ternary factor representing a visual measurement that includes inverse depth */ template -class InvDepthFactor3: public NoiseModelFactor3 { +class InvDepthFactor3: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -34,7 +34,7 @@ class InvDepthFactor3: public NoiseModelFactor3 { public: /// shorthand for base class type - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef InvDepthFactor3 This; diff --git a/gtsam_unstable/slam/InvDepthFactorVariant1.h b/gtsam_unstable/slam/InvDepthFactorVariant1.h index 40c09416c8..a41a06ccde 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant1.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant1.h @@ -24,7 +24,7 @@ namespace gtsam { /** * Binary factor representing a visual measurement using an inverse-depth parameterization */ -class InvDepthFactorVariant1: public NoiseModelFactor2 { +class InvDepthFactorVariant1: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -34,7 +34,7 @@ class InvDepthFactorVariant1: public NoiseModelFactor2 { public: /// shorthand for base class type - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef InvDepthFactorVariant1 This; @@ -93,8 +93,8 @@ class InvDepthFactorVariant1: public NoiseModelFactor2 { return camera.project(world_P_landmark) - measured_; } catch( CheiralityException& e) { std::cout << e.what() - << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key2()) << "]" - << " moved behind camera [" << DefaultKeyFormatter(this->key1()) <<"]" + << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key<2>()) << "]" + << " moved behind camera [" << DefaultKeyFormatter(this->key<1>()) <<"]" << std::endl; return Vector::Ones(2) * 2.0 * K_->fx(); } diff --git a/gtsam_unstable/slam/InvDepthFactorVariant2.h b/gtsam_unstable/slam/InvDepthFactorVariant2.h index b1169580e3..d120eff321 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant2.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant2.h @@ -25,7 +25,7 @@ namespace gtsam { /** * Binary factor representing a visual measurement using an inverse-depth parameterization */ -class InvDepthFactorVariant2: public NoiseModelFactor2 { +class InvDepthFactorVariant2: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -36,7 +36,7 @@ class InvDepthFactorVariant2: public NoiseModelFactor2 { public: /// shorthand for base class type - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef InvDepthFactorVariant2 This; @@ -96,8 +96,8 @@ class InvDepthFactorVariant2: public NoiseModelFactor2 { return camera.project(world_P_landmark) - measured_; } catch( CheiralityException& e) { std::cout << e.what() - << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key2()) << "]" - << " moved behind camera [" << DefaultKeyFormatter(this->key1()) <<"]" + << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key<2>()) << "]" + << " moved behind camera [" << DefaultKeyFormatter(this->key<1>()) <<"]" << std::endl; return Vector::Ones(2) * 2.0 * K_->fx(); } diff --git a/gtsam_unstable/slam/InvDepthFactorVariant3.h b/gtsam_unstable/slam/InvDepthFactorVariant3.h index 98f2db2f37..cade280f0d 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant3.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant3.h @@ -24,7 +24,7 @@ namespace gtsam { /** * Binary factor representing the first visual measurement using an inverse-depth parameterization */ -class InvDepthFactorVariant3a: public NoiseModelFactor2 { +class InvDepthFactorVariant3a: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -34,7 +34,7 @@ class InvDepthFactorVariant3a: public NoiseModelFactor2 { public: /// shorthand for base class type - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef InvDepthFactorVariant3a This; @@ -96,8 +96,8 @@ class InvDepthFactorVariant3a: public NoiseModelFactor2 { return camera.project(world_P_landmark) - measured_; } catch( CheiralityException& e) { std::cout << e.what() - << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key1()) << "," << DefaultKeyFormatter(this->key2()) << "]" - << " moved behind camera [" << DefaultKeyFormatter(this->key1()) << "]" + << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key<1>()) << "," << DefaultKeyFormatter(this->key<2>()) << "]" + << " moved behind camera [" << DefaultKeyFormatter(this->key<1>()) << "]" << std::endl; return Vector::Ones(2) * 2.0 * K_->fx(); } @@ -150,7 +150,7 @@ class InvDepthFactorVariant3a: public NoiseModelFactor2 { /** * Ternary factor representing a visual measurement using an inverse-depth parameterization */ -class InvDepthFactorVariant3b: public NoiseModelFactor3 { +class InvDepthFactorVariant3b: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -160,7 +160,7 @@ class InvDepthFactorVariant3b: public NoiseModelFactor3 { public: /// shorthand for base class type - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef InvDepthFactorVariant3b This; @@ -222,8 +222,8 @@ class InvDepthFactorVariant3b: public NoiseModelFactor3 { return camera.project(world_P_landmark) - measured_; } catch( CheiralityException& e) { std::cout << e.what() - << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key1()) << "," << DefaultKeyFormatter(this->key3()) << "]" - << " moved behind camera " << DefaultKeyFormatter(this->key2()) + << ": Inverse Depth Landmark [" << DefaultKeyFormatter(this->key<1>()) << "," << DefaultKeyFormatter(this->key<3>()) << "]" + << " moved behind camera " << DefaultKeyFormatter(this->key<2>()) << std::endl; return Vector::Ones(2) * 2.0 * K_->fx(); } diff --git a/gtsam_unstable/slam/LocalOrientedPlane3Factor.cpp b/gtsam_unstable/slam/LocalOrientedPlane3Factor.cpp index 25d7083f80..3a8cd0c6cf 100644 --- a/gtsam_unstable/slam/LocalOrientedPlane3Factor.cpp +++ b/gtsam_unstable/slam/LocalOrientedPlane3Factor.cpp @@ -15,8 +15,8 @@ namespace gtsam { void LocalOrientedPlane3Factor::print(const string& s, const KeyFormatter& keyFormatter) const { cout << s << (s == "" ? "" : "\n"); - cout << "LocalOrientedPlane3Factor Factor (" << keyFormatter(key1()) << ", " - << keyFormatter(key2()) << ", " << keyFormatter(key3()) << ")\n"; + cout << "LocalOrientedPlane3Factor Factor (" << keyFormatter(key<1>()) << ", " + << keyFormatter(key<2>()) << ", " << keyFormatter(key<3>()) << ")\n"; measured_p_.print("Measured Plane"); this->noiseModel_->print(" noise model: "); } diff --git a/gtsam_unstable/slam/LocalOrientedPlane3Factor.h b/gtsam_unstable/slam/LocalOrientedPlane3Factor.h index f81c18bfa4..f2c5dd3a97 100644 --- a/gtsam_unstable/slam/LocalOrientedPlane3Factor.h +++ b/gtsam_unstable/slam/LocalOrientedPlane3Factor.h @@ -35,10 +35,10 @@ namespace gtsam { * optimized in x1 frame in the optimization. */ class GTSAM_UNSTABLE_EXPORT LocalOrientedPlane3Factor - : public NoiseModelFactor3 { + : public NoiseModelFactorN { protected: OrientedPlane3 measured_p_; - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; public: /// Constructor LocalOrientedPlane3Factor() {} diff --git a/gtsam_unstable/slam/MultiProjectionFactor.h b/gtsam_unstable/slam/MultiProjectionFactor.h index 91d79f2089..34ab51d15b 100644 --- a/gtsam_unstable/slam/MultiProjectionFactor.h +++ b/gtsam_unstable/slam/MultiProjectionFactor.h @@ -187,8 +187,8 @@ namespace gtsam { if (H1) *H1 = Matrix::Zero(2,6); if (H2) *H2 = Matrix::Zero(2,3); if (verboseCheirality_) - std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->key2()) << - " moved behind camera " << DefaultKeyFormatter(this->key1()) << std::endl; + std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->keys_.at(1)) << + " moved behind camera " << DefaultKeyFormatter(this->keys_.at(0)) << std::endl; if (throwCheirality_) throw e; } diff --git a/gtsam_unstable/slam/PartialPriorFactor.h b/gtsam_unstable/slam/PartialPriorFactor.h index 52a57b945c..b49b491314 100644 --- a/gtsam_unstable/slam/PartialPriorFactor.h +++ b/gtsam_unstable/slam/PartialPriorFactor.h @@ -35,7 +35,7 @@ namespace gtsam { * @tparam VALUE is the type of variable the prior effects */ template - class PartialPriorFactor: public NoiseModelFactor1 { + class PartialPriorFactor: public NoiseModelFactorN { public: typedef VALUE T; @@ -44,7 +44,7 @@ namespace gtsam { // Concept checks on the variable type - currently requires Lie GTSAM_CONCEPT_LIE_TYPE(VALUE) - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef PartialPriorFactor This; Vector prior_; ///< Measurement on tangent space parameters, in compressed form. @@ -141,6 +141,7 @@ namespace gtsam { friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor1 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor1", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(prior_); diff --git a/gtsam_unstable/slam/PoseBetweenFactor.h b/gtsam_unstable/slam/PoseBetweenFactor.h index b47dcaf33b..aaf00b45d1 100644 --- a/gtsam_unstable/slam/PoseBetweenFactor.h +++ b/gtsam_unstable/slam/PoseBetweenFactor.h @@ -29,12 +29,12 @@ namespace gtsam { * @ingroup slam */ template - class PoseBetweenFactor: public NoiseModelFactor2 { + class PoseBetweenFactor: public NoiseModelFactorN { private: typedef PoseBetweenFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; POSE measured_; /** The measurement */ boost::optional body_P_sensor_; ///< The pose of the sensor in the body frame diff --git a/gtsam_unstable/slam/PosePriorFactor.h b/gtsam_unstable/slam/PosePriorFactor.h index b0cb25cfaf..c7a094bda3 100644 --- a/gtsam_unstable/slam/PosePriorFactor.h +++ b/gtsam_unstable/slam/PosePriorFactor.h @@ -26,12 +26,12 @@ namespace gtsam { * @ingroup slam */ template - class PosePriorFactor: public NoiseModelFactor1 { + class PosePriorFactor: public NoiseModelFactorN { private: typedef PosePriorFactor This; - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; POSE prior_; /** The measurement */ boost::optional body_P_sensor_; ///< The pose of the sensor in the body frame diff --git a/gtsam_unstable/slam/PoseToPointFactor.h b/gtsam_unstable/slam/PoseToPointFactor.h index 8d183015e0..0d295113d6 100644 --- a/gtsam_unstable/slam/PoseToPointFactor.h +++ b/gtsam_unstable/slam/PoseToPointFactor.h @@ -21,10 +21,10 @@ namespace gtsam { * @ingroup slam */ template -class PoseToPointFactor : public NoiseModelFactor2 { +class PoseToPointFactor : public NoiseModelFactorN { private: typedef PoseToPointFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; POINT measured_; /** the point measurement in local coordinates */ @@ -47,7 +47,8 @@ class PoseToPointFactor : public NoiseModelFactor2 { /** print */ void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { - std::cout << s << "PoseToPointFactor(" << keyFormatter(this->key1()) << "," + std::cout << s << "PoseToPointFactor(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ")\n" << " measured: " << measured_.transpose() << std::endl; this->noiseModel_->print(" noise model: "); @@ -91,8 +92,10 @@ class PoseToPointFactor : public NoiseModelFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar& boost::serialization::make_nvp( - "NoiseModelFactor2", boost::serialization::base_object(*this)); + "NoiseModelFactor2", + boost::serialization::base_object(*this)); ar& BOOST_SERIALIZATION_NVP(measured_); } diff --git a/gtsam_unstable/slam/ProjectionFactorPPP.h b/gtsam_unstable/slam/ProjectionFactorPPP.h index d6f643c6cc..ab8cba4816 100644 --- a/gtsam_unstable/slam/ProjectionFactorPPP.h +++ b/gtsam_unstable/slam/ProjectionFactorPPP.h @@ -31,7 +31,7 @@ namespace gtsam { * @ingroup slam */ template - class ProjectionFactorPPP: public NoiseModelFactor3 { + class ProjectionFactorPPP: public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O @@ -45,7 +45,7 @@ namespace gtsam { public: /// shorthand for base class type - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef ProjectionFactorPPP This; @@ -142,8 +142,11 @@ namespace gtsam { if (H2) *H2 = Matrix::Zero(2,6); if (H3) *H3 = Matrix::Zero(2,3); if (verboseCheirality_) - std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->key2()) << - " moved behind camera " << DefaultKeyFormatter(this->key1()) << std::endl; + std::cout << e.what() << ": Landmark " + << DefaultKeyFormatter(this->key2()) + << " moved behind camera " + << DefaultKeyFormatter(this->key1()) + << std::endl; if (throwCheirality_) throw e; } diff --git a/gtsam_unstable/slam/ProjectionFactorPPPC.h b/gtsam_unstable/slam/ProjectionFactorPPPC.h index 5c9a8c6914..dec893af48 100644 --- a/gtsam_unstable/slam/ProjectionFactorPPPC.h +++ b/gtsam_unstable/slam/ProjectionFactorPPPC.h @@ -34,7 +34,7 @@ namespace gtsam { */ template class GTSAM_UNSTABLE_EXPORT ProjectionFactorPPPC - : public NoiseModelFactor4 { + : public NoiseModelFactorN { protected: Point2 measured_; ///< 2D measurement @@ -44,7 +44,7 @@ class GTSAM_UNSTABLE_EXPORT ProjectionFactorPPPC public: /// shorthand for base class type - typedef NoiseModelFactor4 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef ProjectionFactorPPPC This; diff --git a/gtsam_unstable/slam/ProjectionFactorRollingShutter.cpp b/gtsam_unstable/slam/ProjectionFactorRollingShutter.cpp index c92a13daf5..8173c9dbde 100644 --- a/gtsam_unstable/slam/ProjectionFactorRollingShutter.cpp +++ b/gtsam_unstable/slam/ProjectionFactorRollingShutter.cpp @@ -54,9 +54,9 @@ Vector ProjectionFactorRollingShutter::evaluateError( if (H3) *H3 = Matrix::Zero(2, 3); if (verboseCheirality_) std::cout << e.what() << ": Landmark " - << DefaultKeyFormatter(this->key2()) << " moved behind camera " - << DefaultKeyFormatter(this->key1()) << std::endl; - if (throwCheirality_) throw CheiralityException(this->key2()); + << DefaultKeyFormatter(this->key<2>()) << " moved behind camera " + << DefaultKeyFormatter(this->key<1>()) << std::endl; + if (throwCheirality_) throw CheiralityException(this->key<2>()); } return Vector2::Constant(2.0 * K_->fx()); } diff --git a/gtsam_unstable/slam/ProjectionFactorRollingShutter.h b/gtsam_unstable/slam/ProjectionFactorRollingShutter.h index 597d894da8..806f54fa55 100644 --- a/gtsam_unstable/slam/ProjectionFactorRollingShutter.h +++ b/gtsam_unstable/slam/ProjectionFactorRollingShutter.h @@ -42,7 +42,7 @@ namespace gtsam { */ class GTSAM_UNSTABLE_EXPORT ProjectionFactorRollingShutter - : public NoiseModelFactor3 { + : public NoiseModelFactorN { protected: // Keep a copy of measurement and calibration for I/O Point2 measured_; ///< 2D measurement @@ -60,7 +60,7 @@ class GTSAM_UNSTABLE_EXPORT ProjectionFactorRollingShutter public: /// shorthand for base class type - typedef NoiseModelFactor3 Base; + typedef NoiseModelFactorN Base; /// shorthand for this class typedef ProjectionFactorRollingShutter This; diff --git a/gtsam_unstable/slam/RelativeElevationFactor.h b/gtsam_unstable/slam/RelativeElevationFactor.h index ebf91d1f7b..c6273ff4bb 100644 --- a/gtsam_unstable/slam/RelativeElevationFactor.h +++ b/gtsam_unstable/slam/RelativeElevationFactor.h @@ -25,13 +25,13 @@ namespace gtsam { * * TODO: enable use of a Pose3 for the target as well */ -class GTSAM_UNSTABLE_EXPORT RelativeElevationFactor: public NoiseModelFactor2 { +class GTSAM_UNSTABLE_EXPORT RelativeElevationFactor: public NoiseModelFactorN { private: double measured_; /** measurement */ typedef RelativeElevationFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; public: @@ -66,6 +66,7 @@ class GTSAM_UNSTABLE_EXPORT RelativeElevationFactor: public NoiseModelFactor2 void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + // NoiseModelFactor2 instead of NoiseModelFactorN for backward compatibility ar & boost::serialization::make_nvp("NoiseModelFactor2", boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(measured_); diff --git a/gtsam_unstable/slam/TSAMFactors.h b/gtsam_unstable/slam/TSAMFactors.h index 6f98a2dbdc..2e024c5bbe 100644 --- a/gtsam_unstable/slam/TSAMFactors.h +++ b/gtsam_unstable/slam/TSAMFactors.h @@ -26,11 +26,11 @@ namespace gtsam { /** * DeltaFactor: relative 2D measurement between Pose2 and Point2 */ -class DeltaFactor: public NoiseModelFactor2 { +class DeltaFactor: public NoiseModelFactorN { public: typedef DeltaFactor This; - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef boost::shared_ptr shared_ptr; private: @@ -55,11 +55,11 @@ class DeltaFactor: public NoiseModelFactor2 { /** * DeltaFactorBase: relative 2D measurement between Pose2 and Point2, with Basenodes */ -class DeltaFactorBase: public NoiseModelFactor4 { +class DeltaFactorBase: public NoiseModelFactorN { public: typedef DeltaFactorBase This; - typedef NoiseModelFactor4 Base; + typedef NoiseModelFactorN Base; typedef boost::shared_ptr shared_ptr; private: @@ -110,11 +110,11 @@ class DeltaFactorBase: public NoiseModelFactor4 { /** * OdometryFactorBase: Pose2 odometry, with Basenodes */ -class OdometryFactorBase: public NoiseModelFactor4 { +class OdometryFactorBase: public NoiseModelFactorN { public: typedef OdometryFactorBase This; - typedef NoiseModelFactor4 Base; + typedef NoiseModelFactorN Base; typedef boost::shared_ptr shared_ptr; private: diff --git a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h index 3afa83f45b..0318c3eb12 100644 --- a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h +++ b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h @@ -32,7 +32,7 @@ namespace gtsam { * @ingroup slam */ template - class TransformBtwRobotsUnaryFactor: public NonlinearFactor { // TODO why not NoiseModelFactor1 ? + class TransformBtwRobotsUnaryFactor: public NonlinearFactor { // TODO why not NoiseModelFactorN ? public: @@ -70,7 +70,7 @@ namespace gtsam { TransformBtwRobotsUnaryFactor(Key key, const VALUE& measured, Key keyA, Key keyB, const gtsam::Values& valA, const gtsam::Values& valB, const SharedGaussian& model) : - Base(cref_list_of<1>(key)), key_(key), measured_(measured), keyA_(keyA), keyB_(keyB), + Base(KeyVector{key}), key_(key), measured_(measured), keyA_(keyA), keyB_(keyB), model_(model){ setValAValB(valA, valB); diff --git a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h index 2748d337e6..58553b81fb 100644 --- a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h +++ b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h @@ -84,7 +84,7 @@ namespace gtsam { const double prior_inlier, const double prior_outlier, const bool flag_bump_up_near_zero_probs = false, const bool start_with_M_step = false) : - Base(cref_list_of<1>(key)), key_(key), measured_(measured), keyA_(keyA), keyB_(keyB), + Base(KeyVector{key}), key_(key), measured_(measured), keyA_(keyA), keyB_(keyB), model_inlier_(model_inlier), model_outlier_(model_outlier), prior_inlier_(prior_inlier), prior_outlier_(prior_outlier), flag_bump_up_near_zero_probs_(flag_bump_up_near_zero_probs), start_with_M_step_(false){ diff --git a/gtsam_unstable/slam/tests/testSerialization.cpp b/gtsam_unstable/slam/tests/testSerialization.cpp index 362cf3778f..5244710e85 100644 --- a/gtsam_unstable/slam/tests/testSerialization.cpp +++ b/gtsam_unstable/slam/tests/testSerialization.cpp @@ -17,7 +17,6 @@ #include -#include #include #include @@ -27,7 +26,6 @@ using namespace std; using namespace gtsam; -using namespace boost::assign; namespace fs = boost::filesystem; #ifdef TOPSRCDIR static string topdir = TOPSRCDIR; diff --git a/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp b/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp index b88a27c6ca..cad1feae10 100644 --- a/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp +++ b/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp @@ -25,14 +25,12 @@ #include #include -#include #include #include "gtsam/slam/tests/smartFactorScenarios.h" #define DISABLE_TIMING using namespace gtsam; -using namespace boost::assign; using namespace std::placeholders; static const double rankTol = 1.0; diff --git a/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp b/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp index 1dbd256664..095bedfab2 100644 --- a/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp +++ b/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp @@ -23,12 +23,10 @@ #include #include #include -#include #include #include using namespace std; -using namespace boost::assign; using namespace gtsam; namespace { @@ -289,9 +287,7 @@ TEST( SmartProjectionPoseFactor, noiseless_error_multipleExtrinsics_missingMeasu EXPECT_DOUBLES_EQUAL(expectedError, actualError2, 1e-7); // The following are generically exercising the triangulation - CameraSet cams; - cams += w_Camera_cam1; - cams += w_Camera_cam2; + CameraSet cams{w_Camera_cam1, w_Camera_cam2}; TriangulationResult result = factor1.triangulateSafe(cams); CHECK(result); EXPECT(assert_equal(landmark, *result, 1e-7)); diff --git a/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp b/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp index fc56b1a9f4..9f61d4fb0f 100644 --- a/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp +++ b/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp @@ -24,12 +24,10 @@ #include #include #include -#include #include #include using namespace std; -using namespace boost::assign; using namespace gtsam; namespace { @@ -220,9 +218,7 @@ TEST( SmartProjectionPoseFactor, noiselessWithMissingMeasurements ) { double actualError2 = factor1.totalReprojectionError(cameras); EXPECT_DOUBLES_EQUAL(expectedError, actualError2, 1e-7); - CameraSet cams; - cams += level_camera; - cams += level_camera_right; + CameraSet cams{level_camera, level_camera_right}; TriangulationResult result = factor1.triangulateSafe(cams); CHECK(result); EXPECT(assert_equal(landmark, *result, 1e-7)); diff --git a/gtsam_unstable/timing/timeDSFvariants.cpp b/gtsam_unstable/timing/timeDSFvariants.cpp index a7b9136f86..35ae7d4d5e 100644 --- a/gtsam_unstable/timing/timeDSFvariants.cpp +++ b/gtsam_unstable/timing/timeDSFvariants.cpp @@ -22,7 +22,6 @@ #include #include -#include #include #include @@ -32,7 +31,6 @@ using namespace std; using namespace gtsam; -using namespace boost::assign; using boost::format; int main(int argc, char* argv[]) { @@ -42,8 +40,7 @@ int main(int argc, char* argv[]) { os << "images,points,matches,Base,Map,BTree" << endl; // loop over number of images - vector ms; - ms += 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000; + vector ms {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000}; for(size_t m: ms) { // We use volatile here to make these appear to the optimizing compiler as // if their values are only known at run-time. diff --git a/package.xml b/package.xml index 341c78ba3e..9e402d2c4b 100644 --- a/package.xml +++ b/package.xml @@ -1,7 +1,8 @@ - + + gtsam - 4.1.0 + 4.2.0 gtsam Frank Dellaert diff --git a/python/gtsam/examples/CustomFactorExample.py b/python/gtsam/examples/CustomFactorExample.py index 36c1e003d3..c7ae8ad21d 100644 --- a/python/gtsam/examples/CustomFactorExample.py +++ b/python/gtsam/examples/CustomFactorExample.py @@ -60,10 +60,10 @@ def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, key1 = this.keys()[0] key2 = this.keys()[1] pos1, pos2 = values.atVector(key1), values.atVector(key2) - error = measurement - (pos1 - pos2) + error = (pos2 - pos1) - measurement if jacobians is not None: - jacobians[0] = I - jacobians[1] = -I + jacobians[0] = -I + jacobians[1] = I return error diff --git a/python/gtsam/preamble/hybrid.h b/python/gtsam/preamble/hybrid.h index bae636d6a5..90a062d666 100644 --- a/python/gtsam/preamble/hybrid.h +++ b/python/gtsam/preamble/hybrid.h @@ -12,10 +12,9 @@ */ #include +// NOTE: Needed since we are including pybind11/stl.h. #ifdef GTSAM_ALLOCATOR_TBB PYBIND11_MAKE_OPAQUE(std::vector>); #else PYBIND11_MAKE_OPAQUE(std::vector); #endif - -PYBIND11_MAKE_OPAQUE(std::vector); diff --git a/python/gtsam/preamble/inference.h b/python/gtsam/preamble/inference.h index d07a75f6fb..4106c794ac 100644 --- a/python/gtsam/preamble/inference.h +++ b/python/gtsam/preamble/inference.h @@ -10,3 +10,5 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ + +#include \ No newline at end of file diff --git a/python/gtsam/specializations/hybrid.h b/python/gtsam/specializations/hybrid.h index bede6d86c4..e69de29bb2 100644 --- a/python/gtsam/specializations/hybrid.h +++ b/python/gtsam/specializations/hybrid.h @@ -1,4 +0,0 @@ - -py::bind_vector >(m_, "GaussianFactorVector"); - -py::implicitly_convertible >(); diff --git a/python/gtsam/tests/test_DiscreteBayesNet.py b/python/gtsam/tests/test_DiscreteBayesNet.py index ff2ba99d15..d597effa88 100644 --- a/python/gtsam/tests/test_DiscreteBayesNet.py +++ b/python/gtsam/tests/test_DiscreteBayesNet.py @@ -11,13 +11,15 @@ # pylint: disable=no-name-in-module, invalid-name +import math import textwrap import unittest +from gtsam.utils.test_case import GtsamTestCase + import gtsam from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteDistribution, DiscreteFactorGraph, DiscreteKeys, DiscreteValues, Ordering) -from gtsam.utils.test_case import GtsamTestCase # Some keys: Asia = (0, 2) @@ -111,7 +113,7 @@ def test_Asia(self): self.assertEqual(len(actualSample), 8) def test_fragment(self): - """Test sampling and optimizing for Asia fragment.""" + """Test evaluate/sampling/optimizing for Asia fragment.""" # Create a reverse-topologically sorted fragment: fragment = DiscreteBayesNet() @@ -125,8 +127,14 @@ def test_fragment(self): given[key[0]] = 0 # Now sample from fragment: - actual = fragment.sample(given) - self.assertEqual(len(actual), 5) + values = fragment.sample(given) + self.assertEqual(len(values), 5) + + for i in [0, 1, 2]: + self.assertAlmostEqual(fragment.at(i).logProbability(values), + math.log(fragment.at(i).evaluate(values))) + self.assertAlmostEqual(fragment.logProbability(values), + math.log(fragment.evaluate(values))) def test_dot(self): """Check that dot works with position hints.""" @@ -139,7 +147,7 @@ def test_dot(self): # Make sure we can *update* position hints writer = gtsam.DotWriter() ph: dict = writer.positionHints - ph['a'] = 2 # hint at symbol position + ph['a'] = 2 # hint at symbol position writer.positionHints = ph # Check the output of dot diff --git a/python/gtsam/tests/test_GaussianBayesNet.py b/python/gtsam/tests/test_GaussianBayesNet.py index e4d396cfe8..05522441b4 100644 --- a/python/gtsam/tests/test_GaussianBayesNet.py +++ b/python/gtsam/tests/test_GaussianBayesNet.py @@ -10,15 +10,14 @@ """ # pylint: disable=invalid-name, no-name-in-module, no-member -from __future__ import print_function - import unittest -import gtsam import numpy as np -from gtsam import GaussianBayesNet, GaussianConditional from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import GaussianBayesNet, GaussianConditional + # some keys _x_ = 11 _y_ = 22 @@ -29,8 +28,7 @@ def smallBayesNet(): """Create a small Bayes Net for testing""" bayesNet = GaussianBayesNet() I_1x1 = np.eye(1, dtype=float) - bayesNet.push_back(GaussianConditional( - _x_, [9.0], I_1x1, _y_, I_1x1)) + bayesNet.push_back(GaussianConditional(_x_, [9.0], I_1x1, _y_, I_1x1)) bayesNet.push_back(GaussianConditional(_y_, [5.0], I_1x1)) return bayesNet @@ -41,13 +39,33 @@ class TestGaussianBayesNet(GtsamTestCase): def test_matrix(self): """Test matrix method""" R, d = smallBayesNet().matrix() # get matrix and RHS - R1 = np.array([ - [1.0, 1.0], - [0.0, 1.0]]) + R1 = np.array([[1.0, 1.0], [0.0, 1.0]]) d1 = np.array([9.0, 5.0]) np.testing.assert_equal(R, R1) np.testing.assert_equal(d, d1) + def test_evaluate(self): + """Test evaluate method""" + bayesNet = smallBayesNet() + values = gtsam.VectorValues() + values.insert(_x_, np.array([9.0])) + values.insert(_y_, np.array([5.0])) + for i in [0, 1]: + self.assertAlmostEqual(bayesNet.at(i).logProbability(values), + np.log(bayesNet.at(i).evaluate(values))) + self.assertAlmostEqual(bayesNet.logProbability(values), + np.log(bayesNet.evaluate(values))) + + def test_sample(self): + """Test sample method""" + bayesNet = smallBayesNet() + sample = bayesNet.sample() + self.assertIsInstance(sample, gtsam.VectorValues) + + # standard deviation is 1.0 for both, so we set tolerance to 3*sigma + mean = bayesNet.optimize() + self.gtsamAssertEquals(sample, mean, tol=3.0) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py new file mode 100644 index 0000000000..01e1c5a5d8 --- /dev/null +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -0,0 +1,97 @@ +""" +GTSAM Copyright 2010-2022, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Hybrid Values. +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +import math +import unittest + +import numpy as np +from gtsam.symbol_shorthand import A, X +from gtsam.utils.test_case import GtsamTestCase + +from gtsam import (DiscreteConditional, DiscreteKeys, DiscreteValues, GaussianConditional, + GaussianMixture, HybridBayesNet, HybridValues, noiseModel, VectorValues) + + +class TestHybridBayesNet(GtsamTestCase): + """Unit tests for HybridValues.""" + + def test_evaluate(self): + """Test evaluate for a hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia).""" + asiaKey = A(0) + Asia = (asiaKey, 2) + + # Create the continuous conditional + I_1x1 = np.eye(1) + conditional = GaussianConditional.FromMeanAndStddev(X(0), 2 * I_1x1, X(1), [-4], + 5.0) + + # Create the noise models + model0 = noiseModel.Diagonal.Sigmas([2.0]) + model1 = noiseModel.Diagonal.Sigmas([3.0]) + + # Create the conditionals + conditional0 = GaussianConditional(X(1), [5], I_1x1, model0) + conditional1 = GaussianConditional(X(1), [2], I_1x1, model1) + discrete_keys = DiscreteKeys() + discrete_keys.push_back(Asia) + + # Create hybrid Bayes net. + bayesNet = HybridBayesNet() + bayesNet.push_back(conditional) + bayesNet.push_back(GaussianMixture( + [X(1)], [], discrete_keys, [conditional0, conditional1])) + bayesNet.push_back(DiscreteConditional(Asia, "99/1")) + + # Create values at which to evaluate. + values = HybridValues() + continuous = VectorValues() + continuous.insert(X(0), [-6]) + continuous.insert(X(1), [1]) + values.insert(continuous) + discrete = DiscreteValues() + discrete[asiaKey] = 0 + values.insert(discrete) + + conditionalProbability = conditional.evaluate(values.continuous()) + mixtureProbability = conditional0.evaluate(values.continuous()) + self.assertAlmostEqual(conditionalProbability * mixtureProbability * + 0.99, + bayesNet.evaluate(values), + places=5) + + # Check logProbability + self.assertAlmostEqual(bayesNet.logProbability(values), + math.log(bayesNet.evaluate(values))) + + # Check invariance for all conditionals: + self.check_invariance(bayesNet.at(0).asGaussian(), continuous) + self.check_invariance(bayesNet.at(0).asGaussian(), values) + self.check_invariance(bayesNet.at(0), values) + + self.check_invariance(bayesNet.at(1), values) + + self.check_invariance(bayesNet.at(2).asDiscrete(), discrete) + self.check_invariance(bayesNet.at(2).asDiscrete(), values) + self.check_invariance(bayesNet.at(2), values) + + def check_invariance(self, conditional, values): + """Check invariance for given conditional.""" + probability = conditional.evaluate(values) + self.assertTrue(probability >= 0.0) + logProb = conditional.logProbability(values) + self.assertAlmostEqual(probability, np.exp(logProb)) + expected = conditional.logNormalizationConstant() - conditional.error(values) + self.assertAlmostEqual(logProb, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam/tests/test_HybridFactorGraph.py b/python/gtsam/tests/test_HybridFactorGraph.py index 576efa82fd..499afe09f4 100644 --- a/python/gtsam/tests/test_HybridFactorGraph.py +++ b/python/gtsam/tests/test_HybridFactorGraph.py @@ -6,83 +6,328 @@ See LICENSE for the license information Unit tests for Hybrid Factor Graphs. -Author: Fan Jiang +Author: Fan Jiang, Varun Agrawal, Frank Dellaert """ # pylint: disable=invalid-name, no-name-in-module, no-member -from __future__ import print_function - import unittest -import gtsam import numpy as np -from gtsam.symbol_shorthand import C, X +from gtsam.symbol_shorthand import C, M, X, Z from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import (DiscreteConditional, DiscreteKeys, GaussianConditional, + GaussianMixture, GaussianMixtureFactor, HybridBayesNet, + HybridGaussianFactorGraph, HybridValues, JacobianFactor, + Ordering, noiseModel) + +DEBUG_MARGINALS = False + class TestHybridGaussianFactorGraph(GtsamTestCase): """Unit tests for HybridGaussianFactorGraph.""" - def test_create(self): - """Test contruction of hybrid factor graph.""" - noiseModel = gtsam.noiseModel.Unit.Create(3) - dk = gtsam.DiscreteKeys() + """Test construction of hybrid factor graph.""" + model = noiseModel.Unit.Create(3) + dk = DiscreteKeys() dk.push_back((C(0), 2)) - jf1 = gtsam.JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), - noiseModel) - jf2 = gtsam.JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), - noiseModel) + jf1 = JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), model) + jf2 = JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), model) - gmf = gtsam.GaussianMixtureFactor.FromFactors([X(0)], dk, [jf1, jf2]) + gmf = GaussianMixtureFactor([X(0)], dk, [jf1, jf2]) - hfg = gtsam.HybridGaussianFactorGraph() - hfg.add(jf1) - hfg.add(jf2) + hfg = HybridGaussianFactorGraph() + hfg.push_back(jf1) + hfg.push_back(jf2) hfg.push_back(gmf) - hbn = hfg.eliminateSequential( - gtsam.Ordering.ColamdConstrainedLastHybridGaussianFactorGraph( - hfg, [C(0)])) + hbn = hfg.eliminateSequential() - # print("hbn = ", hbn) self.assertEqual(hbn.size(), 2) mixture = hbn.at(0).inner() - self.assertIsInstance(mixture, gtsam.GaussianMixture) + self.assertIsInstance(mixture, GaussianMixture) self.assertEqual(len(mixture.keys()), 2) discrete_conditional = hbn.at(hbn.size() - 1).inner() - self.assertIsInstance(discrete_conditional, gtsam.DiscreteConditional) + self.assertIsInstance(discrete_conditional, DiscreteConditional) def test_optimize(self): - """Test contruction of hybrid factor graph.""" - noiseModel = gtsam.noiseModel.Unit.Create(3) - dk = gtsam.DiscreteKeys() + """Test construction of hybrid factor graph.""" + model = noiseModel.Unit.Create(3) + dk = DiscreteKeys() dk.push_back((C(0), 2)) - jf1 = gtsam.JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), - noiseModel) - jf2 = gtsam.JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), - noiseModel) + jf1 = JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), model) + jf2 = JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), model) - gmf = gtsam.GaussianMixtureFactor.FromFactors([X(0)], dk, [jf1, jf2]) + gmf = GaussianMixtureFactor([X(0)], dk, [jf1, jf2]) - hfg = gtsam.HybridGaussianFactorGraph() - hfg.add(jf1) - hfg.add(jf2) + hfg = HybridGaussianFactorGraph() + hfg.push_back(jf1) + hfg.push_back(jf2) hfg.push_back(gmf) - dtf = gtsam.DecisionTreeFactor([(C(0), 2)],"0 1") - hfg.add(dtf) + dtf = gtsam.DecisionTreeFactor([(C(0), 2)], "0 1") + hfg.push_back(dtf) - hbn = hfg.eliminateSequential( - gtsam.Ordering.ColamdConstrainedLastHybridGaussianFactorGraph( - hfg, [C(0)])) + hbn = hfg.eliminateSequential() - # print("hbn = ", hbn) hv = hbn.optimize() self.assertEqual(hv.atDiscrete(C(0)), 1) + @staticmethod + def tiny(num_measurements: int = 1, + prior_mean: float = 5.0, + prior_sigma: float = 0.5) -> HybridBayesNet: + """ + Create a tiny two variable hybrid model which represents + the generative probability P(Z, x0, mode) = P(Z|x0, mode)P(x0)P(mode). + num_measurements: number of measurements in Z = {z0, z1...} + """ + # Create hybrid Bayes net. + bayesNet = HybridBayesNet() + + # Create mode key: 0 is low-noise, 1 is high-noise. + mode = (M(0), 2) + + # Create Gaussian mixture Z(0) = X(0) + noise for each measurement. + I_1x1 = np.eye(1) + keys = DiscreteKeys() + keys.push_back(mode) + for i in range(num_measurements): + conditional0 = GaussianConditional.FromMeanAndStddev(Z(i), + I_1x1, + X(0), [0], + sigma=0.5) + conditional1 = GaussianConditional.FromMeanAndStddev(Z(i), + I_1x1, + X(0), [0], + sigma=3) + bayesNet.push_back(GaussianMixture([Z(i)], [X(0)], keys, + [conditional0, conditional1])) + + # Create prior on X(0). + prior_on_x0 = GaussianConditional.FromMeanAndStddev( + X(0), [prior_mean], prior_sigma) + bayesNet.push_back(prior_on_x0) + + # Add prior on mode. + bayesNet.push_back(DiscreteConditional(mode, "4/6")) + + return bayesNet + + def test_evaluate(self): + """Test evaluate with two different prior noise models.""" + # TODO(dellaert): really a HBN test + # Create a tiny Bayes net P(x0) P(m0) P(z0|x0) + bayesNet1 = self.tiny(prior_sigma=0.5, num_measurements=1) + bayesNet2 = self.tiny(prior_sigma=5.0, num_measurements=1) + # bn1: # 1/sqrt(2*pi*0.5^2) + # bn2: # 1/sqrt(2*pi*5.0^2) + expected_ratio = np.sqrt(2 * np.pi * 5.0**2) / np.sqrt( + 2 * np.pi * 0.5**2) + mean0 = HybridValues() + mean0.insert(X(0), [5.0]) + mean0.insert(Z(0), [5.0]) + mean0.insert(M(0), 0) + self.assertAlmostEqual(bayesNet1.evaluate(mean0) / + bayesNet2.evaluate(mean0), + expected_ratio, + delta=1e-9) + mean1 = HybridValues() + mean1.insert(X(0), [5.0]) + mean1.insert(Z(0), [5.0]) + mean1.insert(M(0), 1) + self.assertAlmostEqual(bayesNet1.evaluate(mean1) / + bayesNet2.evaluate(mean1), + expected_ratio, + delta=1e-9) + + @staticmethod + def measurements(sample: HybridValues, indices) -> gtsam.VectorValues: + """Create measurements from a sample, grabbing Z(i) for indices.""" + measurements = gtsam.VectorValues() + for i in indices: + measurements.insert(Z(i), sample.at(Z(i))) + return measurements + + @classmethod + def estimate_marginals(cls, + target, + proposal_density: HybridBayesNet, + N=10000): + """Do importance sampling to estimate discrete marginal P(mode).""" + # Allocate space for marginals on mode. + marginals = np.zeros((2, )) + + # Do importance sampling. + for s in range(N): + proposed = proposal_density.sample() # sample from proposal + target_proposed = target(proposed) # evaluate target + weight = target_proposed / proposal_density.evaluate(proposed) + marginals[proposed.atDiscrete(M(0))] += weight + + # print marginals: + marginals /= marginals.sum() + return marginals + + def test_tiny(self): + """Test a tiny two variable hybrid model.""" + # Create P(x0)P(mode)P(z0|x0,mode) + prior_sigma = 0.5 + bayesNet = self.tiny(prior_sigma=prior_sigma) + + # Deterministic values exactly at the mean, for both x and Z: + values = HybridValues() + values.insert(X(0), [5.0]) + values.insert(M(0), 0) # low-noise, standard deviation 0.5 + measurements = gtsam.VectorValues() + measurements.insert(Z(0), [5.0]) + values.insert(measurements) + + def unnormalized_posterior(x): + """Posterior is proportional to joint, centered at 5.0 as well.""" + x.insert(measurements) + return bayesNet.evaluate(x) + + # Create proposal density on (x0, mode), making sure it has same mean: + posterior_information = 1 / (prior_sigma**2) + 1 / (0.5**2) + posterior_sigma = posterior_information**(-0.5) + proposal_density = self.tiny(num_measurements=0, + prior_mean=5.0, + prior_sigma=posterior_sigma) + + # Estimate marginals using importance sampling. + marginals = self.estimate_marginals(target=unnormalized_posterior, + proposal_density=proposal_density) + if DEBUG_MARGINALS: + print(f"True mode: {values.atDiscrete(M(0))}") + print(f"P(mode=0; Z) = {marginals[0]}") + print(f"P(mode=1; Z) = {marginals[1]}") + + # Check that the estimate is close to the true value. + self.assertAlmostEqual(marginals[0], 0.74, delta=0.01) + self.assertAlmostEqual(marginals[1], 0.26, delta=0.01) + + # Convert to factor graph with given measurements. + fg = bayesNet.toFactorGraph(measurements) + self.assertEqual(fg.size(), 3) + + # Check ratio between unnormalized posterior and factor graph is the same for all modes: + for mode in [1, 0]: + values.insert_or_assign(M(0), mode) + self.assertAlmostEqual(bayesNet.evaluate(values) / + np.exp(-fg.error(values)), + 0.6366197723675815) + self.assertAlmostEqual(bayesNet.error(values), fg.error(values)) + + # Test elimination. + posterior = fg.eliminateSequential() + + def true_posterior(x): + """Posterior from elimination.""" + x.insert(measurements) + return posterior.evaluate(x) + + # Estimate marginals using importance sampling. + marginals = self.estimate_marginals(target=true_posterior, + proposal_density=proposal_density) + if DEBUG_MARGINALS: + print(f"True mode: {values.atDiscrete(M(0))}") + print(f"P(mode=0; z0) = {marginals[0]}") + print(f"P(mode=1; z0) = {marginals[1]}") + + # Check that the estimate is close to the true value. + self.assertAlmostEqual(marginals[0], 0.74, delta=0.01) + self.assertAlmostEqual(marginals[1], 0.26, delta=0.01) + + @staticmethod + def calculate_ratio(bayesNet: HybridBayesNet, + fg: HybridGaussianFactorGraph, sample: HybridValues): + """Calculate ratio between Bayes net and factor graph.""" + return bayesNet.evaluate(sample) / fg.probPrime(sample) if \ + fg.probPrime(sample) > 0 else 0 + + def test_ratio(self): + """ + Given a tiny two variable hybrid model, with 2 measurements, test the + ratio of the bayes net model representing P(z,x,n)=P(z|x, n)P(x)P(n) + and the factor graph P(x, n | z)=P(x | n, z)P(n|z), + both of which represent the same posterior. + """ + # Create generative model P(z, x, n)=P(z|x, n)P(x)P(n) + prior_sigma = 0.5 + bayesNet = self.tiny(prior_sigma=prior_sigma, num_measurements=2) + + # Deterministic values exactly at the mean, for both x and Z: + values = HybridValues() + values.insert(X(0), [5.0]) + values.insert(M(0), 0) # high-noise, standard deviation 3 + measurements = gtsam.VectorValues() + measurements.insert(Z(0), [4.0]) + measurements.insert(Z(1), [6.0]) + values.insert(measurements) + + def unnormalized_posterior(x): + """Posterior is proportional to joint, centered at 5.0 as well.""" + x.insert(measurements) + return bayesNet.evaluate(x) + + # Create proposal density on (x0, mode), making sure it has same mean: + posterior_information = 1 / (prior_sigma**2) + 2.0 / (3.0**2) + posterior_sigma = posterior_information**(-0.5) + proposal_density = self.tiny(num_measurements=0, + prior_mean=5.0, + prior_sigma=posterior_sigma) + + # Estimate marginals using importance sampling. + marginals = self.estimate_marginals(target=unnormalized_posterior, + proposal_density=proposal_density) + if DEBUG_MARGINALS: + print(f"True mode: {values.atDiscrete(M(0))}") + print(f"P(mode=0; Z) = {marginals[0]}") + print(f"P(mode=1; Z) = {marginals[1]}") + + # Check that the estimate is close to the true value. + self.assertAlmostEqual(marginals[0], 0.23, delta=0.01) + self.assertAlmostEqual(marginals[1], 0.77, delta=0.01) + + # Convert to factor graph using measurements. + fg = bayesNet.toFactorGraph(measurements) + self.assertEqual(fg.size(), 4) + + # Calculate ratio between Bayes net probability and the factor graph: + expected_ratio = self.calculate_ratio(bayesNet, fg, values) + # print(f"expected_ratio: {expected_ratio}\n") + + # Check with a number of other samples. + for i in range(10): + samples = bayesNet.sample() + samples.update(measurements) + ratio = self.calculate_ratio(bayesNet, fg, samples) + # print(f"Ratio: {ratio}\n") + if (ratio > 0): + self.assertAlmostEqual(ratio, expected_ratio) + + # Test elimination. + posterior = fg.eliminateSequential() + + # Calculate ratio between Bayes net probability and the factor graph: + expected_ratio = self.calculate_ratio(posterior, fg, values) + # print(f"expected_ratio: {expected_ratio}\n") + + # Check with a number of other samples. + for i in range(10): + samples = posterior.sample() + samples.insert(measurements) + ratio = self.calculate_ratio(posterior, fg, samples) + # print(f"Ratio: {ratio}\n") + if (ratio > 0): + self.assertAlmostEqual(ratio, expected_ratio) + + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/tests/test_HybridNonlinearFactorGraph.py b/python/gtsam/tests/test_HybridNonlinearFactorGraph.py index 3ac0d5c6f9..ed26a0c813 100644 --- a/python/gtsam/tests/test_HybridNonlinearFactorGraph.py +++ b/python/gtsam/tests/test_HybridNonlinearFactorGraph.py @@ -14,39 +14,38 @@ import unittest -import gtsam import numpy as np from gtsam.symbol_shorthand import C, X from gtsam.utils.test_case import GtsamTestCase +from gtsam import BetweenFactorPoint3, noiseModel, PriorFactorPoint3, Point3 + +import gtsam class TestHybridGaussianFactorGraph(GtsamTestCase): """Unit tests for HybridGaussianFactorGraph.""" - def test_nonlinear_hybrid(self): nlfg = gtsam.HybridNonlinearFactorGraph() dk = gtsam.DiscreteKeys() dk.push_back((10, 2)) - nlfg.add(gtsam.BetweenFactorPoint3(1, 2, gtsam.Point3(1, 2, 3), gtsam.noiseModel.Diagonal.Variances([1, 1, 1]))) - nlfg.add( - gtsam.PriorFactorPoint3(2, gtsam.Point3(1, 2, 3), gtsam.noiseModel.Diagonal.Variances([0.5, 0.5, 0.5]))) + nlfg.push_back(BetweenFactorPoint3(1, 2, Point3( + 1, 2, 3), noiseModel.Diagonal.Variances([1, 1, 1]))) + nlfg.push_back( + PriorFactorPoint3(2, Point3(1, 2, 3), + noiseModel.Diagonal.Variances([0.5, 0.5, 0.5]))) nlfg.push_back( gtsam.MixtureFactor([1], dk, [ - gtsam.PriorFactorPoint3(1, gtsam.Point3(0, 0, 0), - gtsam.noiseModel.Unit.Create(3)), - gtsam.PriorFactorPoint3(1, gtsam.Point3(1, 2, 1), - gtsam.noiseModel.Unit.Create(3)) + PriorFactorPoint3(1, Point3(0, 0, 0), + noiseModel.Unit.Create(3)), + PriorFactorPoint3(1, Point3(1, 2, 1), + noiseModel.Unit.Create(3)) ])) - nlfg.add(gtsam.DecisionTreeFactor((10, 2), "1 3")) + nlfg.push_back(gtsam.DecisionTreeFactor((10, 2), "1 3")) values = gtsam.Values() - values.insert_point3(1, gtsam.Point3(0, 0, 0)) - values.insert_point3(2, gtsam.Point3(2, 3, 1)) + values.insert_point3(1, Point3(0, 0, 0)) + values.insert_point3(2, Point3(2, 3, 1)) hfg = nlfg.linearize(values) - o = gtsam.Ordering() - o.push_back(1) - o.push_back(2) - o.push_back(10) - hbn = hfg.eliminateSequential(o) + hbn = hfg.eliminateSequential() hbv = hbn.optimize() self.assertEqual(hbv.atDiscrete(10), 0) diff --git a/python/gtsam/tests/test_HybridValues.py b/python/gtsam/tests/test_HybridValues.py index 63e7c8e7dd..8a54d518ce 100644 --- a/python/gtsam/tests/test_HybridValues.py +++ b/python/gtsam/tests/test_HybridValues.py @@ -1,5 +1,5 @@ """ -GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +GTSAM Copyright 2010-2022, Georgia Tech Research Corporation, Atlanta, Georgia 30332-0415 All Rights Reserved @@ -20,22 +20,23 @@ from gtsam.utils.test_case import GtsamTestCase -class TestHybridGaussianFactorGraph(GtsamTestCase): +class TestHybridValues(GtsamTestCase): """Unit tests for HybridValues.""" def test_basic(self): - """Test contruction and basic methods of hybrid values.""" - + """Test construction and basic methods of hybrid values.""" + hv1 = gtsam.HybridValues() - hv1.insert(X(0), np.ones((3,1))) + hv1.insert(X(0), np.ones((3, 1))) hv1.insert(C(0), 2) hv2 = gtsam.HybridValues() hv2.insert(C(0), 2) - hv2.insert(X(0), np.ones((3,1))) + hv2.insert(X(0), np.ones((3, 1))) self.assertEqual(hv1.atDiscrete(C(0)), 2) - self.assertEqual(hv1.at(X(0))[0], np.ones((3,1))[0]) + self.assertEqual(hv1.at(X(0))[0], np.ones((3, 1))[0]) + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/tests/test_Rot3.py b/python/gtsam/tests/test_Rot3.py index a1ce01ba2c..e1eeb7fe4f 100644 --- a/python/gtsam/tests/test_Rot3.py +++ b/python/gtsam/tests/test_Rot3.py @@ -13,7 +13,7 @@ import numpy as np import gtsam -from gtsam import Rot3 +from gtsam import Point3, Rot3, Unit3 from gtsam.utils.test_case import GtsamTestCase @@ -2032,6 +2032,31 @@ def test_axis_angle_stress_test(self) -> None: angle_deg = np.rad2deg(angle) assert angle_deg < 180 + def test_rotate(self) -> None: + """Test that rotate() works for both Point3 and Unit3.""" + R = Rot3(np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]])) + p = Point3(1., 1., 1.) + u = Unit3(np.array([1, 1, 1])) + actual_p = R.rotate(p) + actual_u = R.rotate(u) + expected_p = Point3(np.array([1, -1, 1])) + expected_u = Unit3(np.array([1, -1, 1])) + np.testing.assert_array_equal(actual_p, expected_p) + np.testing.assert_array_equal(actual_u.point3(), expected_u.point3()) + + def test_unrotate(self) -> None: + """Test that unrotate() after rotate() yields original Point3/Unit3.""" + wRc = Rot3(np.array(R1_R2_pairs[0][0])) + c_p = Point3(1., 1., 1.) + c_u = Unit3(np.array([1, 1, 1])) + w_p = wRc.rotate(c_p) + w_u = wRc.rotate(c_u) + actual_p = wRc.unrotate(w_p) + actual_u = wRc.unrotate(w_u) + + np.testing.assert_almost_equal(actual_p, c_p, decimal=6) + np.testing.assert_almost_equal(actual_u.point3(), c_u.point3(), decimal=6) + if __name__ == "__main__": unittest.main() diff --git a/tests/simulated2D.h b/tests/simulated2D.h index 097dbd3feb..2cd0d0f392 100644 --- a/tests/simulated2D.h +++ b/tests/simulated2D.h @@ -124,9 +124,9 @@ namespace simulated2D { * Unary factor encoding a soft prior on a vector */ template - class GenericPrior: public NoiseModelFactor1 { + class GenericPrior: public NoiseModelFactorN { public: - typedef NoiseModelFactor1 Base; ///< base class + typedef NoiseModelFactorN Base; ///< base class typedef GenericPrior This; typedef boost::shared_ptr > shared_ptr; typedef VALUE Pose; ///< shortcut to Pose type @@ -168,9 +168,9 @@ namespace simulated2D { * Binary factor simulating "odometry" between two Vectors */ template - class GenericOdometry: public NoiseModelFactor2 { + class GenericOdometry: public NoiseModelFactorN { public: - typedef NoiseModelFactor2 Base; ///< base class + typedef NoiseModelFactorN Base; ///< base class typedef GenericOdometry This; typedef boost::shared_ptr > shared_ptr; typedef VALUE Pose; ///< shortcut to Pose type @@ -214,9 +214,9 @@ namespace simulated2D { * Binary factor simulating "measurement" between two Vectors */ template - class GenericMeasurement: public NoiseModelFactor2 { + class GenericMeasurement: public NoiseModelFactorN { public: - typedef NoiseModelFactor2 Base; ///< base class + typedef NoiseModelFactorN Base; ///< base class typedef GenericMeasurement This; typedef boost::shared_ptr > shared_ptr; typedef POSE Pose; ///< shortcut to Pose type diff --git a/tests/simulated2DOriented.h b/tests/simulated2DOriented.h index 924a5fe4e1..086da7cad6 100644 --- a/tests/simulated2DOriented.h +++ b/tests/simulated2DOriented.h @@ -80,13 +80,13 @@ namespace simulated2DOriented { /// Unary factor encoding a soft prior on a vector template - struct GenericPosePrior: public NoiseModelFactor1 { + struct GenericPosePrior: public NoiseModelFactorN { Pose2 measured_; ///< measurement /// Create generic pose prior GenericPosePrior(const Pose2& measured, const SharedNoiseModel& model, Key key) : - NoiseModelFactor1(model, key), measured_(measured) { + NoiseModelFactorN(model, key), measured_(measured) { } /// Evaluate error and optionally derivative @@ -101,7 +101,7 @@ namespace simulated2DOriented { * Binary factor simulating "odometry" between two Vectors */ template - struct GenericOdometry: public NoiseModelFactor2 { + struct GenericOdometry: public NoiseModelFactorN { Pose2 measured_; ///< Between measurement for odometry factor typedef GenericOdometry This; @@ -111,7 +111,7 @@ namespace simulated2DOriented { */ GenericOdometry(const Pose2& measured, const SharedNoiseModel& model, Key i1, Key i2) : - NoiseModelFactor2(model, i1, i2), measured_(measured) { + NoiseModelFactorN(model, i1, i2), measured_(measured) { } ~GenericOdometry() override {} diff --git a/tests/simulated3D.h b/tests/simulated3D.h index 3b4afb1063..4a20acd159 100644 --- a/tests/simulated3D.h +++ b/tests/simulated3D.h @@ -68,7 +68,7 @@ Point3 mea(const Point3& x, const Point3& l, /** * A prior factor on a single linear robot pose */ -struct PointPrior3D: public NoiseModelFactor1 { +struct PointPrior3D: public NoiseModelFactorN { Point3 measured_; ///< The prior pose value for the variable attached to this factor @@ -79,7 +79,7 @@ struct PointPrior3D: public NoiseModelFactor1 { * @param key is the key for the pose */ PointPrior3D(const Point3& measured, const SharedNoiseModel& model, Key key) : - NoiseModelFactor1 (model, key), measured_(measured) { + NoiseModelFactorN (model, key), measured_(measured) { } /** @@ -98,7 +98,7 @@ struct PointPrior3D: public NoiseModelFactor1 { /** * Models a linear 3D measurement between 3D points */ -struct Simulated3DMeasurement: public NoiseModelFactor2 { +struct Simulated3DMeasurement: public NoiseModelFactorN { Point3 measured_; ///< Linear displacement between a pose and landmark @@ -110,7 +110,7 @@ struct Simulated3DMeasurement: public NoiseModelFactor2 { * @param pointKey is the point key for the landmark */ Simulated3DMeasurement(const Point3& measured, const SharedNoiseModel& model, Key i, Key j) : - NoiseModelFactor2(model, i, j), measured_(measured) {} + NoiseModelFactorN(model, i, j), measured_(measured) {} /** * Error function with optional derivatives diff --git a/tests/smallExample.h b/tests/smallExample.h index ca9a8580fc..7439a5436a 100644 --- a/tests/smallExample.h +++ b/tests/smallExample.h @@ -26,7 +26,6 @@ #include #include #include -#include namespace gtsam { namespace example { @@ -223,10 +222,9 @@ inline Values createValues() { /* ************************************************************************* */ inline VectorValues createVectorValues() { using namespace impl; - VectorValues c = boost::assign::pair_list_of - (_l1_, Vector2(0.0, -1.0)) - (_x1_, Vector2(0.0, 0.0)) - (_x2_, Vector2(1.5, 0.0)); + VectorValues c {{_l1_, Vector2(0.0, -1.0)}, + {_x1_, Vector2(0.0, 0.0)}, + {_x2_, Vector2(1.5, 0.0)}}; return c; } @@ -329,12 +327,12 @@ inline Matrix H(const Point2& v) { 0.0, cos(v.y())).finished(); } -struct UnaryFactor: public gtsam::NoiseModelFactor1 { +struct UnaryFactor: public gtsam::NoiseModelFactorN { Point2 z_; UnaryFactor(const Point2& z, const SharedNoiseModel& model, Key key) : - gtsam::NoiseModelFactor1(model, key), z_(z) { + gtsam::NoiseModelFactorN(model, key), z_(z) { } Vector evaluateError(const Point2& x, boost::optional A = boost::none) const override { @@ -547,9 +545,7 @@ inline GaussianFactorGraph createSingleConstraintGraph() { /* ************************************************************************* */ inline VectorValues createSingleConstraintValues() { using namespace impl; - VectorValues config = boost::assign::pair_list_of - (_x_, Vector2(1.0, -1.0)) - (_y_, Vector2(0.2, 0.1)); + VectorValues config{{_x_, Vector2(1.0, -1.0)}, {_y_, Vector2(0.2, 0.1)}}; return config; } @@ -611,10 +607,9 @@ inline GaussianFactorGraph createMultiConstraintGraph() { /* ************************************************************************* */ inline VectorValues createMultiConstraintValues() { using namespace impl; - VectorValues config = boost::assign::pair_list_of - (_x_, Vector2(-2.0, 2.0)) - (_y_, Vector2(-0.1, 0.4)) - (_z_, Vector2(-4.0, 5.0)); + VectorValues config{{_x_, Vector2(-2.0, 2.0)}, + {_y_, Vector2(-0.1, 0.4)}, + {_z_, Vector2(-4.0, 5.0)}}; return config; } diff --git a/tests/testDoglegOptimizer.cpp b/tests/testDoglegOptimizer.cpp index ec41bf6786..61276e89b9 100644 --- a/tests/testDoglegOptimizer.cpp +++ b/tests/testDoglegOptimizer.cpp @@ -29,7 +29,6 @@ #include #include -#include // for 'list_of()' #include #include @@ -127,17 +126,22 @@ TEST(DoglegOptimizer, Iterate) { double Delta = 1.0; for(size_t it=0; it<10; ++it) { - GaussianBayesNet gbn = *fg.linearize(config)->eliminateSequential(); + auto linearized = fg.linearize(config); + // Iterate assumes that linear error = nonlinear error at the linearization point, and this should be true double nonlinearError = fg.error(config); - double linearError = GaussianFactorGraph(gbn).error(config.zeroVectors()); + double linearError = linearized->error(config.zeroVectors()); DOUBLES_EQUAL(nonlinearError, linearError, 1e-5); -// cout << "it " << it << ", Delta = " << Delta << ", error = " << fg->error(*config) << endl; - VectorValues dx_u = gbn.optimizeGradientSearch(); - VectorValues dx_n = gbn.optimize(); - DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate(Delta, DoglegOptimizerImpl::SEARCH_EACH_ITERATION, dx_u, dx_n, gbn, fg, config, fg.error(config)); + + auto gbn = linearized->eliminateSequential(); + VectorValues dx_u = gbn->optimizeGradientSearch(); + VectorValues dx_n = gbn->optimize(); + DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate( + Delta, DoglegOptimizerImpl::SEARCH_EACH_ITERATION, dx_u, dx_n, *gbn, fg, + config, fg.error(config)); Delta = result.delta; EXPECT(result.f_error < fg.error(config)); // Check that error decreases + Values newConfig(config.retract(result.dx_d)); config = newConfig; DOUBLES_EQUAL(fg.error(config), result.f_error, 1e-5); // Check that error is correctly filled in diff --git a/tests/testExpressionFactor.cpp b/tests/testExpressionFactor.cpp index 5b27eea4db..bb35ada199 100644 --- a/tests/testExpressionFactor.cpp +++ b/tests/testExpressionFactor.cpp @@ -27,8 +27,6 @@ #include #include -#include -using boost::assign::list_of; using namespace std::placeholders; using namespace std; @@ -507,7 +505,7 @@ TEST(Expression, testMultipleCompositions) { // Leaf, key = 1 // Leaf, key = 2 Expression sum1_(Combine(1, 2), v1_, v2_); - EXPECT(sum1_.keys() == list_of(1)(2)); + EXPECT((sum1_.keys() == std::set{1, 2})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum1_, values, fd_step, tolerance); // BinaryExpression(3,4) @@ -516,7 +514,7 @@ TEST(Expression, testMultipleCompositions) { // Leaf, key = 2 // Leaf, key = 1 Expression sum2_(Combine(3, 4), sum1_, v1_); - EXPECT(sum2_.keys() == list_of(1)(2)); + EXPECT((sum2_.keys() == std::set{1, 2})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum2_, values, fd_step, tolerance); // BinaryExpression(5,6) @@ -529,7 +527,7 @@ TEST(Expression, testMultipleCompositions) { // Leaf, key = 1 // Leaf, key = 2 Expression sum3_(Combine(5, 6), sum1_, sum2_); - EXPECT(sum3_.keys() == list_of(1)(2)); + EXPECT((sum3_.keys() == std::set{1, 2})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum3_, values, fd_step, tolerance); } @@ -558,19 +556,19 @@ TEST(Expression, testMultipleCompositions2) { Expression v3_(Key(3)); Expression sum1_(Combine(4,5), v1_, v2_); - EXPECT(sum1_.keys() == list_of(1)(2)); + EXPECT((sum1_.keys() == std::set{1, 2})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum1_, values, fd_step, tolerance); Expression sum2_(combine3, v1_, v2_, v3_); - EXPECT(sum2_.keys() == list_of(1)(2)(3)); + EXPECT((sum2_.keys() == std::set{1, 2, 3})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum2_, values, fd_step, tolerance); Expression sum3_(combine3, v3_, v2_, v1_); - EXPECT(sum3_.keys() == list_of(1)(2)(3)); + EXPECT((sum3_.keys() == std::set{1, 2, 3})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum3_, values, fd_step, tolerance); Expression sum4_(combine3, sum1_, sum2_, sum3_); - EXPECT(sum4_.keys() == list_of(1)(2)(3)); + EXPECT((sum4_.keys() == std::set{1, 2, 3})); EXPECT_CORRECT_EXPRESSION_JACOBIANS(sum4_, values, fd_step, tolerance); } @@ -731,6 +729,19 @@ TEST(ExpressionFactor, variadicTemplate) { EXPECT_CORRECT_FACTOR_JACOBIANS(f, values, 1e-8, 1e-5); } +TEST(ExpressionFactor, normalize) { + auto model = noiseModel::Isotropic::Sigma(3, 1); + + // Create expression + const auto x = Vector3_(1); + Vector3_ f_expr = normalize(x); + + // Check derivatives + Values values; + values.insert(1, Vector3(1, 2, 3)); + ExpressionFactor factor(model, Vector3(1.0/sqrt(14), 2.0/sqrt(14), 3.0/sqrt(14)), f_expr); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-5); +} TEST(ExpressionFactor, crossProduct) { auto model = noiseModel::Isotropic::Sigma(3, 1); diff --git a/tests/testExtendedKalmanFilter.cpp b/tests/testExtendedKalmanFilter.cpp index f0c1b4b703..88da7006d1 100644 --- a/tests/testExtendedKalmanFilter.cpp +++ b/tests/testExtendedKalmanFilter.cpp @@ -91,9 +91,9 @@ TEST( ExtendedKalmanFilter, linear ) { // Create Motion Model Factor -class NonlinearMotionModel : public NoiseModelFactor2 { +class NonlinearMotionModel : public NoiseModelFactorN { - typedef NoiseModelFactor2 Base; + typedef NoiseModelFactorN Base; typedef NonlinearMotionModel This; protected: @@ -155,14 +155,14 @@ class NonlinearMotionModel : public NoiseModelFactor2 { /* print */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << ": NonlinearMotionModel\n"; - std::cout << " TestKey1: " << keyFormatter(key1()) << std::endl; - std::cout << " TestKey2: " << keyFormatter(key2()) << std::endl; + std::cout << " TestKey1: " << keyFormatter(key<1>()) << std::endl; + std::cout << " TestKey2: " << keyFormatter(key<2>()) << std::endl; } /** Check if two factors are equal. Note type is IndexFactor and needs cast. */ bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { const This *e = dynamic_cast (&f); - return (e != nullptr) && (key1() == e->key1()) && (key2() == e->key2()); + return (e != nullptr) && (key<1>() == e->key<1>()) && (key<2>() == e->key<2>()); } /** @@ -181,7 +181,7 @@ class NonlinearMotionModel : public NoiseModelFactor2 { /** Vector of errors, whitened ! */ Vector whitenedError(const Values& c) const { - return QInvSqrt(c.at(key1()))*unwhitenedError(c); + return QInvSqrt(c.at(key<1>()))*unwhitenedError(c); } /** @@ -190,14 +190,16 @@ class NonlinearMotionModel : public NoiseModelFactor2 { * Hence b = z - h(x1,x2) = - error_vector(x) */ boost::shared_ptr linearize(const Values& c) const override { - const X1& x1 = c.at(key1()); - const X2& x2 = c.at(key2()); + using X1 = ValueType<1>; + using X2 = ValueType<2>; + const X1& x1 = c.at(key<1>()); + const X2& x2 = c.at(key<2>()); Matrix A1, A2; Vector b = -evaluateError(x1, x2, A1, A2); SharedDiagonal constrained = boost::dynamic_pointer_cast(this->noiseModel_); if (constrained.get() != nullptr) { - return JacobianFactor::shared_ptr(new JacobianFactor(key1(), A1, key2(), + return JacobianFactor::shared_ptr(new JacobianFactor(key<1>(), A1, key<2>(), A2, b, constrained)); } // "Whiten" the system before converting to a Gaussian Factor @@ -205,7 +207,7 @@ class NonlinearMotionModel : public NoiseModelFactor2 { A1 = Qinvsqrt*A1; A2 = Qinvsqrt*A2; b = Qinvsqrt*b; - return GaussianFactor::shared_ptr(new JacobianFactor(key1(), A1, key2(), + return GaussianFactor::shared_ptr(new JacobianFactor(key<1>(), A1, key<2>(), A2, b, noiseModel::Unit::Create(b.size()))); } @@ -232,9 +234,9 @@ class NonlinearMotionModel : public NoiseModelFactor2 { }; // Create Measurement Model Factor -class NonlinearMeasurementModel : public NoiseModelFactor1 { +class NonlinearMeasurementModel : public NoiseModelFactorN { - typedef NoiseModelFactor1 Base; + typedef NoiseModelFactorN Base; typedef NonlinearMeasurementModel This; protected: @@ -320,6 +322,7 @@ class NonlinearMeasurementModel : public NoiseModelFactor1 { * Hence b = z - h(x1) = - error_vector(x) */ boost::shared_ptr linearize(const Values& c) const override { + using X = ValueType<1>; const X& x1 = c.at(key()); Matrix A1; Vector b = -evaluateError(x1, A1); diff --git a/tests/testGaussianBayesTreeB.cpp b/tests/testGaussianBayesTreeB.cpp index a321aa25de..fedc759454 100644 --- a/tests/testGaussianBayesTreeB.cpp +++ b/tests/testGaussianBayesTreeB.cpp @@ -26,9 +26,6 @@ #include -#include // for operator += -using namespace boost::assign; - using namespace std; using namespace gtsam; using namespace example; @@ -241,10 +238,10 @@ TEST( GaussianBayesTree, balanced_smoother_joint ) const Matrix I = I_2x2, A = -0.00429185*I; // Check the joint density P(x1,x7) factored as P(x1|x7)P(x7) - GaussianBayesNet expected1 = list_of + GaussianBayesNet expected1; // Why does the sign get flipped on the prior? - (GaussianConditional(X(1), Z_2x1, I/sigmax7, X(7), A/sigmax7)) - (GaussianConditional(X(7), Z_2x1, -1*I/sigmax7)); + expected1.emplace_shared(X(1), Z_2x1, I/sigmax7, X(7), A/sigmax7); + expected1.emplace_shared(X(7), Z_2x1, -1*I/sigmax7); GaussianBayesNet actual1 = *bayesTree.jointBayesNet(X(1),X(7)); EXPECT(assert_equal(expected1, actual1, tol)); @@ -260,9 +257,9 @@ TEST( GaussianBayesTree, balanced_smoother_joint ) // Check the joint density P(x1,x4), i.e. with a root variable double sig14 = 0.784465; Matrix A14 = -0.0769231*I; - GaussianBayesNet expected3 = list_of - (GaussianConditional(X(1), Z_2x1, I/sig14, X(4), A14/sig14)) - (GaussianConditional(X(4), Z_2x1, I/sigmax4)); + GaussianBayesNet expected3; + expected3.emplace_shared(X(1), Z_2x1, I/sig14, X(4), A14/sig14); + expected3.emplace_shared(X(4), Z_2x1, I/sigmax4); GaussianBayesNet actual3 = *bayesTree.jointBayesNet(X(1),X(4)); EXPECT(assert_equal(expected3,actual3,tol)); diff --git a/tests/testGaussianFactorGraphB.cpp b/tests/testGaussianFactorGraphB.cpp index 1f5ec63506..a6aa7bfc56 100644 --- a/tests/testGaussianFactorGraphB.cpp +++ b/tests/testGaussianFactorGraphB.cpp @@ -27,10 +27,6 @@ #include #include -#include // for operator += -#include // for operator += -#include // for operator += -using namespace boost::assign; #include namespace br { using namespace boost::range; using namespace boost::adaptors; } @@ -73,7 +69,7 @@ TEST(GaussianFactorGraph, eliminateOne_x1) { GaussianFactorGraph fg = createGaussianFactorGraph(); GaussianConditional::shared_ptr conditional; - auto result = fg.eliminatePartialSequential(Ordering(list_of(X(1)))); + auto result = fg.eliminatePartialSequential(Ordering{X(1)}); conditional = result.first->front(); // create expected Conditional Gaussian @@ -89,7 +85,7 @@ TEST(GaussianFactorGraph, eliminateOne_x2) { Ordering ordering; ordering += X(2), L(1), X(1); GaussianFactorGraph fg = createGaussianFactorGraph(); - auto actual = EliminateQR(fg, Ordering(list_of(X(2)))).first; + auto actual = EliminateQR(fg, Ordering{X(2)}).first; // create expected Conditional Gaussian double sigma = 0.0894427; @@ -105,7 +101,7 @@ TEST(GaussianFactorGraph, eliminateOne_l1) { Ordering ordering; ordering += L(1), X(1), X(2); GaussianFactorGraph fg = createGaussianFactorGraph(); - auto actual = EliminateQR(fg, Ordering(list_of(L(1)))).first; + auto actual = EliminateQR(fg, Ordering{L(1)}).first; // create expected Conditional Gaussian double sigma = sqrt(2.0) / 10.; @@ -121,7 +117,7 @@ TEST(GaussianFactorGraph, eliminateOne_x1_fast) { GaussianFactorGraph fg = createGaussianFactorGraph(); GaussianConditional::shared_ptr conditional; JacobianFactor::shared_ptr remaining; - boost::tie(conditional, remaining) = EliminateQR(fg, Ordering(list_of(X(1)))); + boost::tie(conditional, remaining) = EliminateQR(fg, Ordering{X(1)}); // create expected Conditional Gaussian Matrix I = 15 * I_2x2, R11 = I, S12 = -0.111111 * I, S13 = -0.444444 * I; @@ -144,7 +140,7 @@ TEST(GaussianFactorGraph, eliminateOne_x1_fast) { /* ************************************************************************* */ TEST(GaussianFactorGraph, eliminateOne_x2_fast) { GaussianFactorGraph fg = createGaussianFactorGraph(); - auto actual = EliminateQR(fg, Ordering(list_of(X(2)))).first; + auto actual = EliminateQR(fg, Ordering{X(2)}).first; // create expected Conditional Gaussian double sigma = 0.0894427; @@ -158,7 +154,7 @@ TEST(GaussianFactorGraph, eliminateOne_x2_fast) { /* ************************************************************************* */ TEST(GaussianFactorGraph, eliminateOne_l1_fast) { GaussianFactorGraph fg = createGaussianFactorGraph(); - auto actual = EliminateQR(fg, Ordering(list_of(L(1)))).first; + auto actual = EliminateQR(fg, Ordering{L(1)}).first; // create expected Conditional Gaussian double sigma = sqrt(2.0) / 10.; @@ -272,10 +268,10 @@ TEST(GaussianFactorGraph, multiplication) { VectorValues x = createCorrectDelta(); Errors actual = A * x; Errors expected; - expected += Vector2(-1.0, -1.0); - expected += Vector2(2.0, -1.0); - expected += Vector2(0.0, 1.0); - expected += Vector2(-1.0, 1.5); + expected.push_back(Vector2(-1.0, -1.0)); + expected.push_back(Vector2(2.0, -1.0)); + expected.push_back(Vector2(0.0, 1.0)); + expected.push_back(Vector2(-1.0, 1.5)); EXPECT(assert_equal(expected, actual)); } @@ -287,9 +283,9 @@ TEST(GaussianFactorGraph, elimination) { Matrix Ap = I_1x1, An = I_1x1 * -1; Vector b = (Vector(1) << 0.0).finished(); SharedDiagonal sigma = noiseModel::Isotropic::Sigma(1, 2.0); - fg += JacobianFactor(X(1), An, X(2), Ap, b, sigma); - fg += JacobianFactor(X(1), Ap, b, sigma); - fg += JacobianFactor(X(2), Ap, b, sigma); + fg.emplace_shared(X(1), An, X(2), Ap, b, sigma); + fg.emplace_shared(X(1), Ap, b, sigma); + fg.emplace_shared(X(2), Ap, b, sigma); // Eliminate Ordering ordering; diff --git a/tests/testGaussianISAM.cpp b/tests/testGaussianISAM.cpp index 309f0e1e08..1c1ce90d51 100644 --- a/tests/testGaussianISAM.cpp +++ b/tests/testGaussianISAM.cpp @@ -22,8 +22,6 @@ #include #include -#include // for operator += -using namespace boost::assign; #include namespace br { using namespace boost::adaptors; using namespace boost::range; } diff --git a/tests/testGaussianISAM2.cpp b/tests/testGaussianISAM2.cpp index e8e916f046..8dbf3fff6d 100644 --- a/tests/testGaussianISAM2.cpp +++ b/tests/testGaussianISAM2.cpp @@ -24,9 +24,7 @@ #include -#include #include -using namespace boost::assign; namespace br { using namespace boost::adaptors; using namespace boost::range; } using namespace std; @@ -686,7 +684,7 @@ TEST(ISAM2, marginalizeLeaves1) { isam.update(factors, values, FactorIndices(), constrainedKeys); - FastList leafKeys = list_of(0); + FastList leafKeys {0}; EXPECT(checkMarginalizeLeaves(isam, leafKeys)); } @@ -716,7 +714,7 @@ TEST(ISAM2, marginalizeLeaves2) { isam.update(factors, values, FactorIndices(), constrainedKeys); - FastList leafKeys = list_of(0); + FastList leafKeys {0}; EXPECT(checkMarginalizeLeaves(isam, leafKeys)); } @@ -755,7 +753,7 @@ TEST(ISAM2, marginalizeLeaves3) { isam.update(factors, values, FactorIndices(), constrainedKeys); - FastList leafKeys = list_of(0); + FastList leafKeys {0}; EXPECT(checkMarginalizeLeaves(isam, leafKeys)); } @@ -780,7 +778,7 @@ TEST(ISAM2, marginalizeLeaves4) { isam.update(factors, values, FactorIndices(), constrainedKeys); - FastList leafKeys = list_of(1); + FastList leafKeys {1}; EXPECT(checkMarginalizeLeaves(isam, leafKeys)); } @@ -791,7 +789,7 @@ TEST(ISAM2, marginalizeLeaves5) ISAM2 isam = createSlamlikeISAM2(); // Marginalize - FastList marginalizeKeys = list_of(0); + FastList marginalizeKeys {0}; EXPECT(checkMarginalizeLeaves(isam, marginalizeKeys)); } diff --git a/tests/testGaussianJunctionTreeB.cpp b/tests/testGaussianJunctionTreeB.cpp index dfdb32b468..681554645e 100644 --- a/tests/testGaussianJunctionTreeB.cpp +++ b/tests/testGaussianJunctionTreeB.cpp @@ -42,17 +42,11 @@ #include -#include -#include -#include - #include #include #include #include -using namespace boost::assign; - #include using namespace std; @@ -79,8 +73,7 @@ TEST( GaussianJunctionTreeB, constructor2 ) { // linearize GaussianFactorGraph::shared_ptr fg = nlfg.linearize(values); - Ordering ordering; - ordering += X(1), X(3), X(5), X(7), X(2), X(6), X(4); + const Ordering ordering {X(1), X(3), X(5), X(7), X(2), X(6), X(4)}; // create an ordering GaussianEliminationTree etree(*fg, ordering); diff --git a/tests/testGraph.cpp b/tests/testGraph.cpp index ad7cabb993..64c86c4c4c 100644 --- a/tests/testGraph.cpp +++ b/tests/testGraph.cpp @@ -27,8 +27,6 @@ #include #include -#include // for operator += -using namespace boost::assign; #include @@ -47,8 +45,7 @@ TEST ( Ordering, predecessorMap2Keys ) { p_map.insert(4,3); p_map.insert(5,1); - list expected; - expected += 4,5,3,2,1; + list expected{4, 5, 3, 2, 1}; list actual = predecessorMap2Keys(p_map); LONGS_EQUAL((long)expected.size(), (long)actual.size()); diff --git a/tests/testManifold.cpp b/tests/testManifold.cpp index a05c4b621a..7d788d1099 100644 --- a/tests/testManifold.cpp +++ b/tests/testManifold.cpp @@ -29,10 +29,6 @@ #undef CHECK #include -#include -using boost::assign::list_of; -using boost::assign::map_list_of; - using namespace std; using namespace gtsam; diff --git a/tests/testNonlinearFactor.cpp b/tests/testNonlinearFactor.cpp index 67a23355d6..c823614502 100644 --- a/tests/testNonlinearFactor.cpp +++ b/tests/testNonlinearFactor.cpp @@ -329,11 +329,68 @@ TEST( NonlinearFactor, cloneWithNewNoiseModel ) CHECK(assert_equal(expected, actual)); } +/* ************************************************************************* */ +class TestFactor1 : public NoiseModelFactor1 { + static_assert(std::is_same::value, "Base type wrong"); + static_assert(std::is_same>::value, + "This type wrong"); + + public: + typedef NoiseModelFactor1 Base; + TestFactor1() : Base(noiseModel::Diagonal::Sigmas(Vector1(2.0)), L(1)) {} + using Base::NoiseModelFactor1; // inherit constructors + + Vector evaluateError(const double& x1, boost::optional H1 = + boost::none) const override { + if (H1) *H1 = (Matrix(1, 1) << 1.0).finished(); + return (Vector(1) << x1).finished(); + } + + gtsam::NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new TestFactor1(*this))); + } +}; + +/* ************************************ */ +TEST(NonlinearFactor, NoiseModelFactor1) { + TestFactor1 tf; + Values tv; + tv.insert(L(1), double((1.0))); + EXPECT(assert_equal((Vector(1) << 1.0).finished(), tf.unwhitenedError(tv))); + DOUBLES_EQUAL(0.25 / 2.0, tf.error(tv), 1e-9); + JacobianFactor jf( + *boost::dynamic_pointer_cast(tf.linearize(tv))); + LONGS_EQUAL((long)L(1), (long)jf.keys()[0]); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 0.5).finished(), + jf.getA(jf.begin()))); + EXPECT(assert_equal((Vector)(Vector(1) << -0.5).finished(), jf.getb())); + + // Test all functions/types for backwards compatibility + static_assert(std::is_same::value, + "X type incorrect"); + EXPECT(assert_equal(tf.key(), L(1))); + std::vector H = {Matrix()}; + EXPECT(assert_equal(Vector1(1.0), tf.unwhitenedError(tv, H))); + + // Test constructors + TestFactor1 tf2(noiseModel::Unit::Create(1), L(1)); + TestFactor1 tf3(noiseModel::Unit::Create(1), {L(1)}); + TestFactor1 tf4(noiseModel::Unit::Create(1), gtsam::Symbol('L', 1)); +} + /* ************************************************************************* */ class TestFactor4 : public NoiseModelFactor4 { -public: + static_assert(std::is_same::value, "Base type wrong"); + static_assert( + std::is_same>::value, + "This type wrong"); + + public: typedef NoiseModelFactor4 Base; TestFactor4() : Base(noiseModel::Diagonal::Sigmas((Vector(1) << 2.0).finished()), X(1), X(2), X(3), X(4)) {} + using Base::NoiseModelFactor4; // inherit constructors Vector evaluateError(const double& x1, const double& x2, const double& x3, const double& x4, @@ -347,7 +404,7 @@ class TestFactor4 : public NoiseModelFactor4 { *H3 = (Matrix(1, 1) << 3.0).finished(); *H4 = (Matrix(1, 1) << 4.0).finished(); } - return (Vector(1) << x1 + x2 + x3 + x4).finished(); + return (Vector(1) << x1 + 2.0 * x2 + 3.0 * x3 + 4.0 * x4).finished(); } gtsam::NonlinearFactor::shared_ptr clone() const override { @@ -363,8 +420,8 @@ TEST(NonlinearFactor, NoiseModelFactor4) { tv.insert(X(2), double((2.0))); tv.insert(X(3), double((3.0))); tv.insert(X(4), double((4.0))); - EXPECT(assert_equal((Vector(1) << 10.0).finished(), tf.unwhitenedError(tv))); - DOUBLES_EQUAL(25.0/2.0, tf.error(tv), 1e-9); + EXPECT(assert_equal((Vector(1) << 30.0).finished(), tf.unwhitenedError(tv))); + DOUBLES_EQUAL(0.5 * 30.0 * 30.0 / 4.0, tf.error(tv), 1e-9); JacobianFactor jf(*boost::dynamic_pointer_cast(tf.linearize(tv))); LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); LONGS_EQUAL((long)X(2), (long)jf.keys()[1]); @@ -374,7 +431,49 @@ TEST(NonlinearFactor, NoiseModelFactor4) { EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 1.0).finished(), jf.getA(jf.begin()+1))); EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 1.5).finished(), jf.getA(jf.begin()+2))); EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.0).finished(), jf.getA(jf.begin()+3))); - EXPECT(assert_equal((Vector)(Vector(1) << -5.0).finished(), jf.getb())); + EXPECT(assert_equal((Vector)(Vector(1) << 0.5 * -30.).finished(), jf.getb())); + + // Test all functions/types for backwards compatibility + static_assert(std::is_same::value, + "X1 type incorrect"); + static_assert(std::is_same::value, + "X2 type incorrect"); + static_assert(std::is_same::value, + "X3 type incorrect"); + static_assert(std::is_same::value, + "X4 type incorrect"); + EXPECT(assert_equal(tf.key1(), X(1))); + EXPECT(assert_equal(tf.key2(), X(2))); + EXPECT(assert_equal(tf.key3(), X(3))); + EXPECT(assert_equal(tf.key4(), X(4))); + std::vector H = {Matrix(), Matrix(), Matrix(), Matrix()}; + EXPECT(assert_equal(Vector1(30.0), tf.unwhitenedError(tv, H))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 1.).finished(), H.at(0))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.).finished(), H.at(1))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 3.).finished(), H.at(2))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 4.).finished(), H.at(3))); + + // And test "forward compatibility" using `key` and `ValueType` too + static_assert(std::is_same, double>::value, + "ValueType<1> type incorrect"); + static_assert(std::is_same, double>::value, + "ValueType<2> type incorrect"); + static_assert(std::is_same, double>::value, + "ValueType<3> type incorrect"); + static_assert(std::is_same, double>::value, + "ValueType<4> type incorrect"); + EXPECT(assert_equal(tf.key<1>(), X(1))); + EXPECT(assert_equal(tf.key<2>(), X(2))); + EXPECT(assert_equal(tf.key<3>(), X(3))); + EXPECT(assert_equal(tf.key<4>(), X(4))); + + // Test constructors + TestFactor4 tf2(noiseModel::Unit::Create(1), L(1), L(2), L(3), L(4)); + TestFactor4 tf3(noiseModel::Unit::Create(1), {L(1), L(2), L(3), L(4)}); + TestFactor4 tf4(noiseModel::Unit::Create(1), + std::array{L(1), L(2), L(3), L(4)}); + std::vector keys = {L(1), L(2), L(3), L(4)}; + TestFactor4 tf5(noiseModel::Unit::Create(1), keys); } /* ************************************************************************* */ @@ -397,7 +496,8 @@ class TestFactor5 : public NoiseModelFactor5(tf.linearize(tv))); LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); LONGS_EQUAL((long)X(2), (long)jf.keys()[1]); @@ -423,7 +523,7 @@ TEST(NonlinearFactor, NoiseModelFactor5) { EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 1.5).finished(), jf.getA(jf.begin()+2))); EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.0).finished(), jf.getA(jf.begin()+3))); EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.5).finished(), jf.getA(jf.begin()+4))); - EXPECT(assert_equal((Vector)(Vector(1) << -7.5).finished(), jf.getb())); + EXPECT(assert_equal((Vector)(Vector(1) << 0.5 * -55.).finished(), jf.getb())); } /* ************************************************************************* */ @@ -448,7 +548,9 @@ class TestFactor6 : public NoiseModelFactor6(tf.linearize(tv))); LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); LONGS_EQUAL((long)X(2), (long)jf.keys()[1]); @@ -478,8 +580,105 @@ TEST(NonlinearFactor, NoiseModelFactor6) { EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.0).finished(), jf.getA(jf.begin()+3))); EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.5).finished(), jf.getA(jf.begin()+4))); EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 3.0).finished(), jf.getA(jf.begin()+5))); - EXPECT(assert_equal((Vector)(Vector(1) << -10.5).finished(), jf.getb())); + EXPECT(assert_equal((Vector)(Vector(1) << 0.5 * -91.).finished(), jf.getb())); + +} +/* ************************************************************************* */ +class TestFactorN : public NoiseModelFactorN { +public: + typedef NoiseModelFactorN Base; + using Type1 = ValueType<1>; // Test that we can use the ValueType<> template + + TestFactorN() : Base(noiseModel::Diagonal::Sigmas((Vector(1) << 2.0).finished()), X(1), X(2), X(3), X(4)) {} + + Vector + evaluateError(const double& x1, const double& x2, const double& x3, const double& x4, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none, + boost::optional H3 = boost::none, + boost::optional H4 = boost::none) const override { + if (H1) *H1 = (Matrix(1, 1) << 1.0).finished(); + if (H2) *H2 = (Matrix(1, 1) << 2.0).finished(); + if (H3) *H3 = (Matrix(1, 1) << 3.0).finished(); + if (H4) *H4 = (Matrix(1, 1) << 4.0).finished(); + return (Vector(1) << x1 + 2.0 * x2 + 3.0 * x3 + 4.0 * x4).finished(); + } + + Key key1() const { return key<1>(); } // Test that we can use key<> template +}; + +/* ************************************ */ +TEST(NonlinearFactor, NoiseModelFactorN) { + TestFactorN tf; + Values tv; + tv.insert(X(1), double((1.0))); + tv.insert(X(2), double((2.0))); + tv.insert(X(3), double((3.0))); + tv.insert(X(4), double((4.0))); + EXPECT(assert_equal((Vector(1) << 30.0).finished(), tf.unwhitenedError(tv))); + DOUBLES_EQUAL(0.5 * 30.0 * 30.0 / 4.0, tf.error(tv), 1e-9); + JacobianFactor jf(*boost::dynamic_pointer_cast(tf.linearize(tv))); + LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); + LONGS_EQUAL((long)X(2), (long)jf.keys()[1]); + LONGS_EQUAL((long)X(3), (long)jf.keys()[2]); + LONGS_EQUAL((long)X(4), (long)jf.keys()[3]); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 0.5).finished(), jf.getA(jf.begin()))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 1.0).finished(), jf.getA(jf.begin()+1))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 1.5).finished(), jf.getA(jf.begin()+2))); + EXPECT(assert_equal((Matrix)(Matrix(1, 1) << 2.0).finished(), jf.getA(jf.begin()+3))); + EXPECT(assert_equal((Vector)(Vector(1) << -0.5 * 30.).finished(), jf.getb())); + + // Test all evaluateError argument overloads to ensure backward compatibility + Matrix H1_expected, H2_expected, H3_expected, H4_expected; + Vector e_expected = tf.evaluateError(9, 8, 7, 6, H1_expected, H2_expected, + H3_expected, H4_expected); + + std::unique_ptr> base_ptr( + new TestFactorN(tf)); + Matrix H1, H2, H3, H4; + EXPECT(assert_equal(e_expected, base_ptr->evaluateError(9, 8, 7, 6))); + EXPECT(assert_equal(e_expected, base_ptr->evaluateError(9, 8, 7, 6, H1))); + EXPECT(assert_equal(H1_expected, H1)); + EXPECT(assert_equal(e_expected, // + base_ptr->evaluateError(9, 8, 7, 6, H1, H2))); + EXPECT(assert_equal(H1_expected, H1)); + EXPECT(assert_equal(H2_expected, H2)); + EXPECT(assert_equal(e_expected, + base_ptr->evaluateError(9, 8, 7, 6, H1, H2, H3))); + EXPECT(assert_equal(H1_expected, H1)); + EXPECT(assert_equal(H2_expected, H2)); + EXPECT(assert_equal(H3_expected, H3)); + EXPECT(assert_equal(e_expected, + base_ptr->evaluateError(9, 8, 7, 6, H1, H2, H3, H4))); + EXPECT(assert_equal(H1_expected, H1)); + EXPECT(assert_equal(H2_expected, H2)); + EXPECT(assert_equal(H3_expected, H3)); + EXPECT(assert_equal(H4_expected, H4)); + + // Test all functions/types for backwards compatibility + + static_assert(std::is_same::value, + "X1 type incorrect"); + EXPECT(assert_equal(tf.key3(), X(3))); + + + // Test using `key` and `ValueType` + static_assert(std::is_same, double>::value, + "ValueType<1> type incorrect"); + static_assert(std::is_same, double>::value, + "ValueType<2> type incorrect"); + static_assert(std::is_same, double>::value, + "ValueType<3> type incorrect"); + static_assert(std::is_same, double>::value, + "ValueType<4> type incorrect"); + static_assert(std::is_same::value, + "TestFactorN::Type1 type incorrect"); + EXPECT(assert_equal(tf.key<1>(), X(1))); + EXPECT(assert_equal(tf.key<2>(), X(2))); + EXPECT(assert_equal(tf.key<3>(), X(3))); + EXPECT(assert_equal(tf.key<4>(), X(4))); + EXPECT(assert_equal(tf.key1(), X(1))); } /* ************************************************************************* */ diff --git a/tests/testNonlinearFactorGraph.cpp b/tests/testNonlinearFactorGraph.cpp index e1a88d6169..136cd064f2 100644 --- a/tests/testNonlinearFactorGraph.cpp +++ b/tests/testNonlinearFactorGraph.cpp @@ -32,10 +32,6 @@ #include -#include -#include -using namespace boost::assign; - /*STL/C++*/ #include diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 295721cc42..cc82892f73 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -33,8 +33,6 @@ #include #include -#include // for operator += -using namespace boost::assign; using boost::adaptors::map_values; #include diff --git a/tests/testPCGSolver.cpp b/tests/testPCGSolver.cpp index 9d6cc49ac4..558f7c1e48 100644 --- a/tests/testPCGSolver.cpp +++ b/tests/testPCGSolver.cpp @@ -27,8 +27,6 @@ #include #include -#include // for operator += -using namespace boost::assign; #include #include diff --git a/tests/testSubgraphPreconditioner.cpp b/tests/testSubgraphPreconditioner.cpp index c5b4e42ecf..8abd547951 100644 --- a/tests/testSubgraphPreconditioner.cpp +++ b/tests/testSubgraphPreconditioner.cpp @@ -29,10 +29,8 @@ #include -#include #include #include -using namespace boost::assign; #include @@ -170,8 +168,8 @@ TEST(SubgraphPreconditioner, system) { const double alpha = 0.5; Errors e1, e2; for (size_t i = 0; i < 13; i++) { - e1 += i < 9 ? Vector2(1, 1) : Vector2(0, 0); - e2 += i >= 9 ? Vector2(1, 1) : Vector2(0, 0); + e1.push_back(i < 9 ? Vector2(1, 1) : Vector2(0, 0)); + e2.push_back(i >= 9 ? Vector2(1, 1) : Vector2(0, 0)); } Vector ee1(13 * 2), ee2(13 * 2); ee1 << Vector::Ones(9 * 2), Vector::Zero(4 * 2); diff --git a/tests/testSubgraphSolver.cpp b/tests/testSubgraphSolver.cpp index 5d8d88775b..69b5fe5f93 100644 --- a/tests/testSubgraphSolver.cpp +++ b/tests/testSubgraphSolver.cpp @@ -28,9 +28,6 @@ #include -#include -using namespace boost::assign; - using namespace std; using namespace gtsam; diff --git a/timing/timeCameraExpression.cpp b/timing/timeCameraExpression.cpp index 07eeb1bcba..5092949992 100644 --- a/timing/timeCameraExpression.cpp +++ b/timing/timeCameraExpression.cpp @@ -100,7 +100,6 @@ int main() { // Oct 3, 2014, Macbook Air // 9.0 musecs/call typedef PinholeCamera Camera; - typedef Expression Camera_; NonlinearFactor::shared_ptr g3 = boost::make_shared >(model, z, Point2_(myProject, x, p)); diff --git a/timing/timeGaussianFactor.cpp b/timing/timeGaussianFactor.cpp index 3aecac3c11..feb738439a 100644 --- a/timing/timeGaussianFactor.cpp +++ b/timing/timeGaussianFactor.cpp @@ -22,7 +22,6 @@ using namespace std; #include -#include #include #include @@ -31,7 +30,6 @@ using namespace std; #include using namespace gtsam; -using namespace boost::assign; static const Key _x1_=1, _x2_=2, _l1_=3; @@ -109,7 +107,7 @@ int main() for(int i = 0; i < n; i++) boost::tie(conditional, factor) = - JacobianFactor(combined).eliminate(Ordering(boost::assign::list_of(_x2_))); + JacobianFactor(combined).eliminate(Ordering{_x2_}); long timeLog2 = clock(); double seconds = (double)(timeLog2-timeLog)/CLOCKS_PER_SEC; diff --git a/timing/timeGaussianFactorGraph.cpp b/timing/timeGaussianFactorGraph.cpp index 2c1e115868..574579f84b 100644 --- a/timing/timeGaussianFactorGraph.cpp +++ b/timing/timeGaussianFactorGraph.cpp @@ -16,14 +16,12 @@ */ #include -#include // for operator += in Ordering #include #include using namespace std; using namespace gtsam; using namespace example; -using namespace boost::assign; /* ************************************************************************* */ // Create a Kalman smoother for t=1:T and optimize diff --git a/timing/timeIncremental.cpp b/timing/timeIncremental.cpp index 5e3fc91898..2b5ec474d5 100644 --- a/timing/timeIncremental.cpp +++ b/timing/timeIncremental.cpp @@ -35,8 +35,8 @@ using namespace gtsam::symbol_shorthand; typedef Pose2 Pose; -typedef NoiseModelFactor1 NM1; -typedef NoiseModelFactor2 NM2; +typedef NoiseModelFactorN NM1; +typedef NoiseModelFactorN NM2; typedef BearingRangeFactor BR; //GTSAM_VALUE_EXPORT(Value); @@ -107,18 +107,18 @@ int main(int argc, char *argv[]) { boost::dynamic_pointer_cast >(measurementf)) { // Stop collecting measurements that are for future steps - if(measurement->key1() > step || measurement->key2() > step) + if(measurement->key<1>() > step || measurement->key<2>() > step) break; // Require that one of the nodes is the current one - if(measurement->key1() != step && measurement->key2() != step) + if(measurement->key<1>() != step && measurement->key<2>() != step) throw runtime_error("Problem in data file, out-of-sequence measurements"); // Add a new factor newFactors.push_back(measurement); // Initialize the new variable - if(measurement->key1() == step && measurement->key2() == step-1) { + if(measurement->key<1>() == step && measurement->key<2>() == step-1) { if(step == 1) newVariables.insert(step, measurement->measured().inverse()); else { @@ -126,7 +126,7 @@ int main(int argc, char *argv[]) { newVariables.insert(step, prevPose * measurement->measured().inverse()); } // cout << "Initializing " << step << endl; - } else if(measurement->key2() == step && measurement->key1() == step-1) { + } else if(measurement->key<2>() == step && measurement->key<1>() == step-1) { if(step == 1) newVariables.insert(step, measurement->measured()); else { diff --git a/timing/timeLago.cpp b/timing/timeLago.cpp index c3ee6ff4bc..bb506de36e 100644 --- a/timing/timeLago.cpp +++ b/timing/timeLago.cpp @@ -10,8 +10,8 @@ * -------------------------------------------------------------------------- */ /** - * @file timeVirtual.cpp - * @brief Time the overhead of using virtual destructors and methods + * @file timeLago.cpp + * @brief Time the LAGO initialization method * @author Richard Roberts * @date Dec 3, 2010 */ @@ -36,16 +36,15 @@ int main(int argc, char *argv[]) { Values::shared_ptr solution; NonlinearFactorGraph::shared_ptr g; string inputFile = findExampleDataFile("w10000"); - SharedDiagonal model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.05, 0.05, 5.0 * M_PI / 180.0).finished()); + auto model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.05, 0.05, 5.0 * M_PI / 180.0).finished()); boost::tie(g, solution) = load2D(inputFile, model); // add noise to create initial estimate Values initial; - Values::ConstFiltered poses = solution->filter(); - SharedDiagonal noise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.5, 0.5, 15.0 * M_PI / 180.0).finished()); + auto noise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.5, 0.5, 15.0 * M_PI / 180.0).finished()); Sampler sampler(noise); - for(const Values::ConstFiltered::KeyValuePair& it: poses) - initial.insert(it.key, it.value.retract(sampler.sample())); + for(const auto& [key,pose]: solution->extract()) + initial.insert(key, pose.retract(sampler.sample())); // Add prior on the pose having index (key) = 0 noiseModel::Diagonal::shared_ptr priorModel = // diff --git a/timing/timePose2.cpp b/timing/timePose2.cpp index 6af3e01637..bf04b9e937 100644 --- a/timing/timePose2.cpp +++ b/timing/timePose2.cpp @@ -97,7 +97,7 @@ Pose2 Pose2betweenOptimized(const Pose2& r1, const Pose2& r2, s, -c, dt2, 0.0, 0.0,-1.0; } - if (H2) *H2 = Matrix3::Identity(); + if (H2) *H2 = I_3x3; return Pose2(R,t); } diff --git a/timing/timeSchurFactors.cpp b/timing/timeSchurFactors.cpp index 9e057f8300..95296e80e5 100644 --- a/timing/timeSchurFactors.cpp +++ b/timing/timeSchurFactors.cpp @@ -14,12 +14,9 @@ #include #include -#include -#include #include using namespace std; -using namespace boost::assign; using namespace gtsam; #define SLOW @@ -104,7 +101,7 @@ void timeAll(size_t m, size_t N) { { // Raw memory Version FastVector < Key > keys; for (size_t i = 0; i < m; i++) - keys += i; + keys.push_back(i); Vector x = xvalues.vector(keys); double* xdata = x.data(); @@ -147,7 +144,7 @@ int main(void) { // ms += 20, 30, 40, 50; // ms += 20,30,40,50,60,70,80,90,100; for (size_t m = 2; m <= 50; m += 2) - ms += m; + ms.push_back(m); //for (size_t m=10;m<=100;m+=10) ms += m; // loop over number of images for(size_t m: ms) diff --git a/timing/timeiSAM2Chain.cpp b/timing/timeiSAM2Chain.cpp index a056bde246..dcb42f763e 100644 --- a/timing/timeiSAM2Chain.cpp +++ b/timing/timeiSAM2Chain.cpp @@ -36,8 +36,8 @@ using namespace gtsam::symbol_shorthand; typedef Pose2 Pose; -typedef NoiseModelFactor1 NM1; -typedef NoiseModelFactor2 NM2; +typedef NoiseModelFactorN NM1; +typedef NoiseModelFactorN NM2; noiseModel::Unit::shared_ptr model = noiseModel::Unit::Create(3); int main(int argc, char *argv[]) {