diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..8e747e4 --- /dev/null +++ b/.clang-format @@ -0,0 +1,28 @@ +--- +BasedOnStyle: Microsoft +AccessModifierOffset: '-1' +AlignAfterOpenBracket: Align +AllowShortFunctionsOnASingleLine: Inline +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Inline +AlwaysBreakAfterReturnType: None +AlwaysBreakTemplateDeclarations: 'Yes' +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +BreakConstructorInitializers: BeforeColon +BreakInheritanceList: BeforeColon +ColumnLimit: '80' +ContinuationIndentWidth: '4' +IncludeBlocks: Preserve +IndentPPDirectives: None +IndentWidth: '2' +Language: Cpp +MaxEmptyLinesToKeep: '1' +NamespaceIndentation: None +PointerAlignment: Left +SpaceBeforeParens: ControlStatements +SpacesBeforeTrailingComments: '2' +TabWidth: '4' +UseTab: Never + +... diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml new file mode 100644 index 0000000..a69528f --- /dev/null +++ b/.github/workflows/clang-format-check.yml @@ -0,0 +1,22 @@ +name: clang-format Check +on: [push, pull_request] +jobs: + formatting-check: + name: Formatting Check + runs-on: ubuntu-latest + strategy: + matrix: + path: + - check: 'src' + exclude: 'third_party' # Exclude file paths containing "hello" or "world" + - check: 'tests' + exclude: '' # Nothing to exclude + steps: + - uses: actions/checkout@v3 + - name: Run clang-format style check for C/C++/Protobuf programs. + uses: jidicula/clang-format-action@v4.11.0 + with: + clang-format-version: '13' + check-path: ${{ matrix.path['check'] }} + exclude-regex: ${{ matrix.path['exclude'] }} + fallback-style: 'Microsoft' # optional \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..12a9567 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "editor.formatOnSave": true, + "editor.formatOnType": true, + "clang-format.executable": "${workspaceFolder}/.clang-format", + "clang-format.style": "file", + "clang-format.fallbackStyle": "Google", + "clang-format.language.cpp.enable": true, +} \ No newline at end of file diff --git a/src/examples/ego_motion_model_adapter/main.cpp b/src/examples/ego_motion_model_adapter/main.cpp index 02f76f5..ae2018e 100644 --- a/src/examples/ego_motion_model_adapter/main.cpp +++ b/src/examples/ego_motion_model_adapter/main.cpp @@ -12,165 +12,172 @@ #include #include -#include "types.h" -#include "motion_model/ego_motion_model.h" #include "kalman_filter/kalman_filter.h" +#include "motion_model/ego_motion_model.h" +#include "types.h" -static constexpr size_t DIM_X{ 5 }; /// \vec{x} = [x, y, vx, vy, yaw]^T -static constexpr size_t DIM_U{ 3 }; /// \vec{u} = [steeringAngle, ds, dyaw]^T -static constexpr size_t DIM_Z{ 2 }; +static constexpr size_t DIM_X{5}; /// \vec{x} = [x, y, vx, vy, yaw]^T +static constexpr size_t DIM_U{3}; /// \vec{u} = [steeringAngle, ds, dyaw]^T +static constexpr size_t DIM_Z{2}; using namespace kf; - -/// @brief This is an adapter example to show case how to convert from the 3-dimension state egomotion model -/// to a higher or lower dimension state vector (e.g. 5-dimension state vector and 3-dimension input vector). -class EgoMotionModelAdapter : public motionmodel::MotionModelExtInput +/// @brief This is an adapter example to show case how to convert from the +/// 3-dimension state egomotion model to a higher or lower dimension state +/// vector (e.g. 5-dimension state vector and 3-dimension input vector). +class EgoMotionModelAdapter + : public motionmodel::MotionModelExtInput { -public: - virtual Vector f(Vector const & vecX, Vector const & vecU, Vector const & /*vecQ = Vector::Zero()*/) const override - { - Vector<3> tmpVecX; // \vec{x} = [x, y, yaw]^T - tmpVecX << vecX[0], vecX[1], vecX[4]; - - Vector<2> tmpVecU; // \vec{u} = [ds, dyaw]^T - tmpVecU << vecU[1], vecU[2]; - - motionmodel::EgoMotionModel const egoMotionModel; - tmpVecX = egoMotionModel.f(tmpVecX, tmpVecU); - - Vector vecXout; - vecXout[0] = tmpVecX[0]; - vecXout[1] = tmpVecX[1]; - vecXout[4] = tmpVecX[2]; - - return vecXout; - } - - virtual Matrix getProcessNoiseCov(Vector const & vecX, Vector const & vecU) const override - { - // input idx -> output index mapping - // 0 -> 0 - // 1 -> 1 - // 2 -> 4 - Vector<3> tmpVecX; - tmpVecX << vecX[0], vecX[1], vecX[4]; - - // input idx -> output index mapping - // 0 -> 1 - // 1 -> 2 - Vector<2> tmpVecU; - tmpVecU << vecU[1], vecU[2]; - - motionmodel::EgoMotionModel const egoMotionModel; - - Matrix<3, 3> matQ = egoMotionModel.getProcessNoiseCov(tmpVecX, tmpVecU); - - // |q00 q01 x x q02| - // |q10 q11 x x q12| |q00 q01 q02| - // Qout = | x x x x x| <- Q = |q10 q11 q12| - // | x x x x x| |q20 q21 q22| - // |q20 q21 x x q22| - - Matrix matQout; - matQout(0, 0) = matQ(0, 0); - matQout(0, 1) = matQ(0, 1); - matQout(0, 4) = matQ(0, 2); - matQout(1, 0) = matQ(1, 0); - matQout(1, 1) = matQ(1, 1); - matQout(1, 4) = matQ(1, 2); - matQout(4, 0) = matQ(2, 0); - matQout(4, 1) = matQ(2, 1); - matQout(4, 4) = matQ(2, 1); - - return matQout; - } - - virtual Matrix getInputNoiseCov(Vector const & vecX, Vector const & vecU) const override - { - Vector<3> tmpVecX; - tmpVecX << vecX[0], vecX[1], vecX[4]; - - Vector<2> tmpVecU; - tmpVecU << vecU[1], vecU[2]; - - motionmodel::EgoMotionModel const egoMotionModel; - - Matrix<3, 3> matU = egoMotionModel.getInputNoiseCov(tmpVecX, tmpVecU); - - Matrix matUout; - matUout(0, 0) = matU(0, 0); - matUout(0, 1) = matU(0, 1); - matUout(0, 4) = matU(0, 2); - matUout(1, 0) = matU(1, 0); - matUout(1, 1) = matU(1, 1); - matUout(1, 4) = matU(1, 2); - matUout(4, 0) = matU(2, 0); - matUout(4, 1) = matU(2, 1); - matUout(4, 4) = matU(2, 1); - - return matUout; - } - - virtual Matrix getJacobianFk(Vector const & vecX, Vector const & vecU) const override - { - Vector<3> tmpVecX; - tmpVecX << vecX[0], vecX[1], vecX[4]; - - Vector<2> tmpVecU; - tmpVecU << vecU[1], vecU[2]; - - motionmodel::EgoMotionModel const egoMotionModel; - - Matrix<3, 3> matFk = egoMotionModel.getJacobianFk(tmpVecX, tmpVecU); - - Matrix matFkout; - matFkout(0, 0) = matFk(0, 0); - matFkout(0, 1) = matFk(0, 1); - matFkout(0, 4) = matFk(0, 2); - matFkout(1, 0) = matFk(1, 0); - matFkout(1, 1) = matFk(1, 1); - matFkout(1, 4) = matFk(1, 2); - matFkout(4, 0) = matFk(2, 0); - matFkout(4, 1) = matFk(2, 1); - matFkout(4, 4) = matFk(2, 1); - - return matFkout; - } - - virtual Matrix getJacobianBk(Vector const & vecX, Vector const & vecU) const override - { - Vector<3> tmpVecX; - tmpVecX << vecX[0], vecX[1], vecX[4]; - - Vector<2> tmpVecU; - tmpVecU << vecU[1], vecU[2]; - - motionmodel::EgoMotionModel const egoMotionModel; - - Matrix<3, 2> matBk = egoMotionModel.getJacobianBk(tmpVecX, tmpVecU); - - Matrix matBkout; - matBkout(0, 1) = matBk(0, 0); - matBkout(0, 2) = matBk(0, 1); - matBkout(1, 1) = matBk(1, 0); - matBkout(1, 2) = matBk(1, 1); - matBkout(4, 1) = matBk(2, 0); - matBkout(4, 2) = matBk(2, 1); - - return matBkout; - } + public: + virtual Vector f( + Vector const& vecX, Vector const& vecU, + Vector const& /*vecQ = Vector::Zero()*/) const override + { + Vector<3> tmpVecX; // \vec{x} = [x, y, yaw]^T + tmpVecX << vecX[0], vecX[1], vecX[4]; + + Vector<2> tmpVecU; // \vec{u} = [ds, dyaw]^T + tmpVecU << vecU[1], vecU[2]; + + motionmodel::EgoMotionModel const egoMotionModel; + tmpVecX = egoMotionModel.f(tmpVecX, tmpVecU); + + Vector vecXout; + vecXout[0] = tmpVecX[0]; + vecXout[1] = tmpVecX[1]; + vecXout[4] = tmpVecX[2]; + + return vecXout; + } + + virtual Matrix getProcessNoiseCov( + Vector const& vecX, Vector const& vecU) const override + { + // input idx -> output index mapping + // 0 -> 0 + // 1 -> 1 + // 2 -> 4 + Vector<3> tmpVecX; + tmpVecX << vecX[0], vecX[1], vecX[4]; + + // input idx -> output index mapping + // 0 -> 1 + // 1 -> 2 + Vector<2> tmpVecU; + tmpVecU << vecU[1], vecU[2]; + + motionmodel::EgoMotionModel const egoMotionModel; + + Matrix<3, 3> matQ = egoMotionModel.getProcessNoiseCov(tmpVecX, tmpVecU); + + // |q00 q01 x x q02| + // |q10 q11 x x q12| |q00 q01 q02| + // Qout = | x x x x x| <- Q = |q10 q11 q12| + // | x x x x x| |q20 q21 q22| + // |q20 q21 x x q22| + + Matrix matQout; + matQout(0, 0) = matQ(0, 0); + matQout(0, 1) = matQ(0, 1); + matQout(0, 4) = matQ(0, 2); + matQout(1, 0) = matQ(1, 0); + matQout(1, 1) = matQ(1, 1); + matQout(1, 4) = matQ(1, 2); + matQout(4, 0) = matQ(2, 0); + matQout(4, 1) = matQ(2, 1); + matQout(4, 4) = matQ(2, 1); + + return matQout; + } + + virtual Matrix getInputNoiseCov( + Vector const& vecX, Vector const& vecU) const override + { + Vector<3> tmpVecX; + tmpVecX << vecX[0], vecX[1], vecX[4]; + + Vector<2> tmpVecU; + tmpVecU << vecU[1], vecU[2]; + + motionmodel::EgoMotionModel const egoMotionModel; + + Matrix<3, 3> matU = egoMotionModel.getInputNoiseCov(tmpVecX, tmpVecU); + + Matrix matUout; + matUout(0, 0) = matU(0, 0); + matUout(0, 1) = matU(0, 1); + matUout(0, 4) = matU(0, 2); + matUout(1, 0) = matU(1, 0); + matUout(1, 1) = matU(1, 1); + matUout(1, 4) = matU(1, 2); + matUout(4, 0) = matU(2, 0); + matUout(4, 1) = matU(2, 1); + matUout(4, 4) = matU(2, 1); + + return matUout; + } + + virtual Matrix getJacobianFk( + Vector const& vecX, Vector const& vecU) const override + { + Vector<3> tmpVecX; + tmpVecX << vecX[0], vecX[1], vecX[4]; + + Vector<2> tmpVecU; + tmpVecU << vecU[1], vecU[2]; + + motionmodel::EgoMotionModel const egoMotionModel; + + Matrix<3, 3> matFk = egoMotionModel.getJacobianFk(tmpVecX, tmpVecU); + + Matrix matFkout; + matFkout(0, 0) = matFk(0, 0); + matFkout(0, 1) = matFk(0, 1); + matFkout(0, 4) = matFk(0, 2); + matFkout(1, 0) = matFk(1, 0); + matFkout(1, 1) = matFk(1, 1); + matFkout(1, 4) = matFk(1, 2); + matFkout(4, 0) = matFk(2, 0); + matFkout(4, 1) = matFk(2, 1); + matFkout(4, 4) = matFk(2, 1); + + return matFkout; + } + + virtual Matrix getJacobianBk( + Vector const& vecX, Vector const& vecU) const override + { + Vector<3> tmpVecX; + tmpVecX << vecX[0], vecX[1], vecX[4]; + + Vector<2> tmpVecU; + tmpVecU << vecU[1], vecU[2]; + + motionmodel::EgoMotionModel const egoMotionModel; + + Matrix<3, 2> matBk = egoMotionModel.getJacobianBk(tmpVecX, tmpVecU); + + Matrix matBkout; + matBkout(0, 1) = matBk(0, 0); + matBkout(0, 2) = matBk(0, 1); + matBkout(1, 1) = matBk(1, 0); + matBkout(1, 2) = matBk(1, 1); + matBkout(4, 1) = matBk(2, 0); + matBkout(4, 2) = matBk(2, 1); + + return matBkout; + } }; int main() { - EgoMotionModelAdapter egoMotionModelAdapter; + EgoMotionModelAdapter egoMotionModelAdapter; - Vector vecU; - vecU << 1.0F, 2.0F, 0.01F; + Vector vecU; + vecU << 1.0F, 2.0F, 0.01F; - KalmanFilter kf; - kf.predictEkf(egoMotionModelAdapter, vecU); + KalmanFilter kf; + kf.predictEkf(egoMotionModelAdapter, vecU); - return 0; + return 0; } diff --git a/src/examples/ekf_range_sensor/main.cpp b/src/examples/ekf_range_sensor/main.cpp index ddfcaa8..2964741 100644 --- a/src/examples/ekf_range_sensor/main.cpp +++ b/src/examples/ekf_range_sensor/main.cpp @@ -12,63 +12,63 @@ #include #include -#include "types.h" #include "kalman_filter/kalman_filter.h" #include "kalman_filter/unscented_transform.h" +#include "types.h" -static constexpr size_t DIM_X{ 2 }; -static constexpr size_t DIM_Z{ 2 }; +static constexpr size_t DIM_X{2}; +static constexpr size_t DIM_Z{2}; static kf::KalmanFilter kalmanfilter; -kf::Vector covertCartesian2Polar(const kf::Vector & cartesian); -kf::Matrix calculateJacobianMatrix(const kf::Vector & vecX); +kf::Vector covertCartesian2Polar(const kf::Vector& cartesian); +kf::Matrix calculateJacobianMatrix(const kf::Vector& vecX); void executeCorrectionStep(); int main() { - executeCorrectionStep(); + executeCorrectionStep(); - return 0; + return 0; } -kf::Vector covertCartesian2Polar(const kf::Vector & cartesian) +kf::Vector covertCartesian2Polar(const kf::Vector& cartesian) { - const kf::Vector polar{ - std::sqrt(cartesian[0] * cartesian[0] + cartesian[1] * cartesian[1]), - std::atan2(cartesian[1], cartesian[0]) - }; - return polar; + const kf::Vector polar{ + std::sqrt(cartesian[0] * cartesian[0] + cartesian[1] * cartesian[1]), + std::atan2(cartesian[1], cartesian[0])}; + return polar; } -kf::Matrix calculateJacobianMatrix(const kf::Vector & vecX) +kf::Matrix calculateJacobianMatrix(const kf::Vector& vecX) { - const kf::float32_t valX2PlusY2{ (vecX[0] * vecX[0]) + (vecX[1] * vecX[1]) }; - const kf::float32_t valSqrtX2PlusY2{ std::sqrt(valX2PlusY2) }; + const kf::float32_t valX2PlusY2{(vecX[0] * vecX[0]) + (vecX[1] * vecX[1])}; + const kf::float32_t valSqrtX2PlusY2{std::sqrt(valX2PlusY2)}; - kf::Matrix matHj; - matHj << - (vecX[0] / valSqrtX2PlusY2), (vecX[1] / valSqrtX2PlusY2), - (-vecX[1] / valX2PlusY2), (vecX[0] / valX2PlusY2); + kf::Matrix matHj; + matHj << (vecX[0] / valSqrtX2PlusY2), (vecX[1] / valSqrtX2PlusY2), + (-vecX[1] / valX2PlusY2), (vecX[0] / valX2PlusY2); - return matHj; + return matHj; } void executeCorrectionStep() { - kalmanfilter.vecX() << 10.0F, 5.0F; - kalmanfilter.matP() << 0.3F, 0.0F, 0.0F, 0.3F; + kalmanfilter.vecX() << 10.0F, 5.0F; + kalmanfilter.matP() << 0.3F, 0.0F, 0.0F, 0.3F; - const kf::Vector measPosCart{ 10.4F, 5.2F }; - const kf::Vector vecZ{ covertCartesian2Polar(measPosCart) }; + const kf::Vector measPosCart{10.4F, 5.2F}; + const kf::Vector vecZ{covertCartesian2Polar(measPosCart)}; - kf::Matrix matR; - matR << 0.1F, 0.0F, 0.0F, 0.0008F; + kf::Matrix matR; + matR << 0.1F, 0.0F, 0.0F, 0.0008F; - kf::Matrix matHj{ calculateJacobianMatrix(kalmanfilter.vecX()) }; // jacobian matrix Hj + kf::Matrix matHj{ + calculateJacobianMatrix(kalmanfilter.vecX())}; // jacobian matrix Hj - kalmanfilter.correctEkf(covertCartesian2Polar, vecZ, matR, matHj); + kalmanfilter.correctEkf(covertCartesian2Polar, vecZ, matR, matHj); - std::cout << "\ncorrected state vector = \n" << kalmanfilter.vecX() << "\n"; - std::cout << "\ncorrected state covariance = \n" << kalmanfilter.matP() << "\n"; + std::cout << "\ncorrected state vector = \n" << kalmanfilter.vecX() << "\n"; + std::cout << "\ncorrected state covariance = \n" + << kalmanfilter.matP() << "\n"; } diff --git a/src/examples/kf_state_estimation/main.cpp b/src/examples/kf_state_estimation/main.cpp index 5cb363b..bffea3d 100644 --- a/src/examples/kf_state_estimation/main.cpp +++ b/src/examples/kf_state_estimation/main.cpp @@ -12,13 +12,13 @@ #include #include -#include "types.h" #include "kalman_filter/kalman_filter.h" +#include "types.h" -static constexpr size_t DIM_X{ 2 }; -static constexpr size_t DIM_Z{ 1 }; -static constexpr kf::float32_t T{ 1.0F }; -static constexpr kf::float32_t Q11{ 0.1F }, Q22{ 0.1F }; +static constexpr size_t DIM_X{2}; +static constexpr size_t DIM_Z{1}; +static constexpr kf::float32_t T{1.0F}; +static constexpr kf::float32_t Q11{0.1F}, Q22{0.1F}; static kf::KalmanFilter kalmanfilter; @@ -27,44 +27,46 @@ void executeCorrectionStep(); int main() { - executePredictionStep(); - executeCorrectionStep(); + executePredictionStep(); + executeCorrectionStep(); - return 0; + return 0; } void executePredictionStep() { - kalmanfilter.vecX() << 0.0F, 2.0F; - kalmanfilter.matP() << 0.1F, 0.0F, 0.0F, 0.1F; + kalmanfilter.vecX() << 0.0F, 2.0F; + kalmanfilter.matP() << 0.1F, 0.0F, 0.0F, 0.1F; - kf::Matrix F; // state transition matrix - F << 1.0F, T, 0.0F, 1.0F; + kf::Matrix F; // state transition matrix + F << 1.0F, T, 0.0F, 1.0F; - kf::Matrix Q; // process noise covariance - Q(0, 0) = (Q11 * T) + (Q22 * (std::pow(T, 3.0F) / 3.0F)); - Q(0, 1) = Q(1, 0) = Q22 * (std::pow(T, 2.0F) / 2.0F); - Q(1, 1) = Q22 * T; + kf::Matrix Q; // process noise covariance + Q(0, 0) = (Q11 * T) + (Q22 * (std::pow(T, 3.0F) / 3.0F)); + Q(0, 1) = Q(1, 0) = Q22 * (std::pow(T, 2.0F) / 2.0F); + Q(1, 1) = Q22 * T; - kalmanfilter.predictLKF(F, Q); // execute prediction step + kalmanfilter.predictLKF(F, Q); // execute prediction step - std::cout << "\npredicted state vector = \n" << kalmanfilter.vecX() << "\n"; - std::cout << "\npredicted state covariance = \n" << kalmanfilter.matP() << "\n"; + std::cout << "\npredicted state vector = \n" << kalmanfilter.vecX() << "\n"; + std::cout << "\npredicted state covariance = \n" + << kalmanfilter.matP() << "\n"; } void executeCorrectionStep() { - kf::Vector vecZ; - vecZ << 2.25F; + kf::Vector vecZ; + vecZ << 2.25F; - kf::Matrix matR; - matR << 0.01F; + kf::Matrix matR; + matR << 0.01F; - kf::Matrix matH; - matH << 1.0F, 0.0F; + kf::Matrix matH; + matH << 1.0F, 0.0F; - kalmanfilter.correctLKF(vecZ, matR, matH); + kalmanfilter.correctLKF(vecZ, matR, matH); - std::cout << "\ncorrected state vector = \n" << kalmanfilter.vecX() << "\n"; - std::cout << "\ncorrected state covariance = \n" << kalmanfilter.matP() << "\n"; + std::cout << "\ncorrected state vector = \n" << kalmanfilter.vecX() << "\n"; + std::cout << "\ncorrected state covariance = \n" + << kalmanfilter.matP() << "\n"; } diff --git a/src/examples/sr_ukf_linear_function/main.cpp b/src/examples/sr_ukf_linear_function/main.cpp index 606214e..a4790b5 100644 --- a/src/examples/sr_ukf_linear_function/main.cpp +++ b/src/examples/sr_ukf_linear_function/main.cpp @@ -12,86 +12,82 @@ #include #include -#include "types.h" #include "kalman_filter/square_root_ukf.h" +#include "types.h" -static constexpr size_t DIM_X{ 2 }; -static constexpr size_t DIM_Z{ 2 }; +static constexpr size_t DIM_X{2}; +static constexpr size_t DIM_Z{2}; void runExample1(); -kf::Vector funcF(const kf::Vector & x) +kf::Vector funcF(const kf::Vector& x) { - return x; + return x; } int main() { - // example 1 - runExample1(); + // example 1 + runExample1(); - return 0; + return 0; } void runExample1() { - std::cout << " Start of Example 1: ===========================" << std::endl; + std::cout << " Start of Example 1: ===========================" << std::endl; - // initializations - //x0 = np.array([1.0, 2.0]) - //P0 = np.array([[1.0, 0.5], [0.5, 1.0]]) - //Q = np.array([[0.5, 0.0], [0.0, 0.5]]) + // initializations + // x0 = np.array([1.0, 2.0]) + // P0 = np.array([[1.0, 0.5], [0.5, 1.0]]) + // Q = np.array([[0.5, 0.0], [0.0, 0.5]]) - //z = np.array([1.2, 1.8]) - //R = np.array([[0.3, 0.0], [0.0, 0.3]]) + // z = np.array([1.2, 1.8]) + // R = np.array([[0.3, 0.0], [0.0, 0.3]]) - kf::Vector x; - x << 1.0F, 2.0F; + kf::Vector x; + x << 1.0F, 2.0F; - kf::Matrix P; - P << 1.0F, 0.5F, - 0.5F, 1.0F; + kf::Matrix P; + P << 1.0F, 0.5F, 0.5F, 1.0F; - kf::Matrix Q; - Q << 0.5F, 0.0F, - 0.0F, 0.5F; + kf::Matrix Q; + Q << 0.5F, 0.0F, 0.0F, 0.5F; - kf::Vector z; - z << 1.2F, 1.8F; + kf::Vector z; + z << 1.2F, 1.8F; - kf::Matrix R; - R << 0.3F, 0.0F, - 0.0F, 0.3F; + kf::Matrix R; + R << 0.3F, 0.0F, 0.0F, 0.3F; - kf::SquareRootUKF srUkf; - srUkf.initialize(x, P, Q, R); + kf::SquareRootUKF srUkf; + srUkf.initialize(x, P, Q, R); - srUkf.predictSRUKF(funcF); + srUkf.predictSRUKF(funcF); - std::cout << "x = \n" << srUkf.vecX() << std::endl; - std::cout << "P = \n" << srUkf.matP() << std::endl; + std::cout << "x = \n" << srUkf.vecX() << std::endl; + std::cout << "P = \n" << srUkf.matP() << std::endl; - // Expectation from the python results: - // ===================================== - //x1 = - // [1. 2.] - //P1 = - // [[1.5 0.5] - // [0.5 1.5]] + // Expectation from the python results: + // ===================================== + // x1 = + // [1. 2.] + // P1 = + // [[1.5 0.5] + // [0.5 1.5]] - srUkf.correctSRUKF(funcF, z); + srUkf.correctSRUKF(funcF, z); - std::cout << "x = \n" << srUkf.vecX() << std::endl; - std::cout << "P = \n" << srUkf.matP() << std::endl; + std::cout << "x = \n" << srUkf.vecX() << std::endl; + std::cout << "P = \n" << srUkf.matP() << std::endl; - // Expectations from the python results: - // ====================================== - // x = - // [1.15385 1.84615] - // P = - // [[ 0.24582 0.01505 ] - // [ 0.01505 0.24582 ]] + // Expectations from the python results: + // ====================================== + // x = + // [1.15385 1.84615] + // P = + // [[ 0.24582 0.01505 ] + // [ 0.01505 0.24582 ]] - std::cout << " End of Example 1: ===========================" << std::endl; + std::cout << " End of Example 1: ===========================" << std::endl; } - diff --git a/src/examples/test_least_squares/main.cpp b/src/examples/test_least_squares/main.cpp index 87d7fd5..b76d939 100644 --- a/src/examples/test_least_squares/main.cpp +++ b/src/examples/test_least_squares/main.cpp @@ -22,91 +22,80 @@ void runExample4(); int main() { - runExample1(); - runExample2(); - runExample3(); - runExample4(); + runExample1(); + runExample2(); + runExample3(); + runExample4(); - return 0; + return 0; } void runExample1() { - std::cout << " Start of Example 1: ===========================" << std::endl; + std::cout << " Start of Example 1: ===========================" << std::endl; - kf::Matrix<3, 3> A; - A << 3.0, 2.0, 1.0, - 2.0, 3.0, 4.0, - 1.0, 4.0, 3.0; + kf::Matrix<3, 3> A; + A << 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 1.0, 4.0, 3.0; - kf::Matrix<2, 3> B; - B << 5.0, 6.0, 7.0, - 8.0, 9.0, 10.0; + kf::Matrix<2, 3> B; + B << 5.0, 6.0, 7.0, 8.0, 9.0, 10.0; - kf::util::JointRows<3, 2, 3> jmat(A, B); - auto AB = jmat.jointMatrix(); + kf::util::JointRows<3, 2, 3> jmat(A, B); + auto AB = jmat.jointMatrix(); - std::cout << "Joint Rows: AB = \n" << AB << std::endl; + std::cout << "Joint Rows: AB = \n" << AB << std::endl; - std::cout << " End of Example 1: ===========================" << std::endl; + std::cout << " End of Example 1: ===========================" << std::endl; } void runExample2() { - std::cout << " Start of Example 2: ===========================" << std::endl; + std::cout << " Start of Example 2: ===========================" << std::endl; - kf::Matrix<3, 3> A; - A << 3.0, 2.0, 1.0, - 2.0, 3.0, 4.0, - 1.0, 4.0, 3.0; + kf::Matrix<3, 3> A; + A << 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 1.0, 4.0, 3.0; - kf::Matrix<3, 2> B; - B << 5.0, 6.0, - 7.0, 8.0, - 9.0, 10.0; + kf::Matrix<3, 2> B; + B << 5.0, 6.0, 7.0, 8.0, 9.0, 10.0; - kf::util::JointCols<3, 3, 2> jmat(A, B); - auto AB = jmat.jointMatrix(); + kf::util::JointCols<3, 3, 2> jmat(A, B); + auto AB = jmat.jointMatrix(); - std::cout << "Joint Columns: AB = \n" << AB << std::endl; + std::cout << "Joint Columns: AB = \n" << AB << std::endl; - std::cout << " End of Example 2: ===========================" << std::endl; + std::cout << " End of Example 2: ===========================" << std::endl; } void runExample3() { - std::cout << " Start of Example 2: ===========================" << std::endl; + std::cout << " Start of Example 2: ===========================" << std::endl; - kf::Matrix<3, 3> A; - A << 1.0, -2.0, 1.0, - 0.0, 1.0, 6.0, - 0.0, 0.0, 1.0; + kf::Matrix<3, 3> A; + A << 1.0, -2.0, 1.0, 0.0, 1.0, 6.0, 0.0, 0.0, 1.0; - kf::Matrix<3, 1> b; - b << 4.0, -1.0, 2.0; + kf::Matrix<3, 1> b; + b << 4.0, -1.0, 2.0; - auto x = kf::util::backwardSubstitute<3, 1>(A, b); + auto x = kf::util::backwardSubstitute<3, 1>(A, b); - std::cout << "Backward Substitution: x = \n" << x << std::endl; + std::cout << "Backward Substitution: x = \n" << x << std::endl; - std::cout << " End of Example 2: ===========================" << std::endl; + std::cout << " End of Example 2: ===========================" << std::endl; } void runExample4() { - std::cout << " Start of Example 2: ===========================" << std::endl; + std::cout << " Start of Example 2: ===========================" << std::endl; - kf::Matrix<3, 3> A; - A << 1.0, 0.0, 0.0, - -2.0, 1.0, 0.0, - 1.0, 6.0, 1.0; + kf::Matrix<3, 3> A; + A << 1.0, 0.0, 0.0, -2.0, 1.0, 0.0, 1.0, 6.0, 1.0; - kf::Matrix<3, 1> b; - b << 4.0, -1.0, 2.0; + kf::Matrix<3, 1> b; + b << 4.0, -1.0, 2.0; - auto x = kf::util::forwardSubstitute<3, 1>(A, b); + auto x = kf::util::forwardSubstitute<3, 1>(A, b); - std::cout << "Forward Substitution: x = \n" << x << std::endl; + std::cout << "Forward Substitution: x = \n" << x << std::endl; - std::cout << " End of Example 2: ===========================" << std::endl; + std::cout << " End of Example 2: ===========================" << std::endl; } diff --git a/src/examples/ukf_range_sensor/main.cpp b/src/examples/ukf_range_sensor/main.cpp index 2232ecc..2269ff2 100644 --- a/src/examples/ukf_range_sensor/main.cpp +++ b/src/examples/ukf_range_sensor/main.cpp @@ -12,109 +12,104 @@ #include #include -#include "types.h" #include "kalman_filter/unscented_kalman_filter.h" +#include "types.h" -static constexpr size_t DIM_X{ 4 }; -static constexpr size_t DIM_V{ 4 }; -static constexpr size_t DIM_Z{ 2 }; -static constexpr size_t DIM_N{ 2 }; +static constexpr size_t DIM_X{4}; +static constexpr size_t DIM_V{4}; +static constexpr size_t DIM_Z{2}; +static constexpr size_t DIM_N{2}; void runExample1(); -kf::Vector funcF(const kf::Vector & x, const kf::Vector & v) +kf::Vector funcF(const kf::Vector& x, const kf::Vector& v) { - kf::Vector y; - y[0] = x[0] + x[2] + v[0]; - y[1] = x[1] + x[3] + v[1]; - y[2] = x[2] + v[2]; - y[3] = x[3] + v[3]; - return y; + kf::Vector y; + y[0] = x[0] + x[2] + v[0]; + y[1] = x[1] + x[3] + v[1]; + y[2] = x[2] + v[2]; + y[3] = x[3] + v[3]; + return y; } -kf::Vector funcH(const kf::Vector & x, const kf::Vector & n) +kf::Vector funcH(const kf::Vector& x, const kf::Vector& n) { - kf::Vector y; + kf::Vector y; - kf::float32_t px{ x[0] + n[0] }; - kf::float32_t py{ x[1] + n[1] }; + kf::float32_t px{x[0] + n[0]}; + kf::float32_t py{x[1] + n[1]}; - y[0] = std::sqrt((px * px) + (py * py)); - y[1] = std::atan(py / (px + std::numeric_limits::epsilon())); - return y; + y[0] = std::sqrt((px * px) + (py * py)); + y[1] = std::atan(py / (px + std::numeric_limits::epsilon())); + return y; } int main() { - // example 1 - runExample1(); + // example 1 + runExample1(); - return 0; + return 0; } void runExample1() { - std::cout << " Start of Example 1: ===========================" << std::endl; + std::cout << " Start of Example 1: ===========================" << std::endl; - kf::Vector x; - x << 2.0F, 1.0F, 0.0F, 0.0F; + kf::Vector x; + x << 2.0F, 1.0F, 0.0F, 0.0F; - kf::Matrix P; - P << 0.01F, 0.0F, 0.0F, 0.0F, - 0.0F, 0.01F, 0.0F, 0.0F, - 0.0F, 0.0F, 0.05F, 0.0F, - 0.0F, 0.0F, 0.0F, 0.05F; + kf::Matrix P; + P << 0.01F, 0.0F, 0.0F, 0.0F, 0.0F, 0.01F, 0.0F, 0.0F, 0.0F, 0.0F, 0.05F, + 0.0F, 0.0F, 0.0F, 0.0F, 0.05F; - kf::Matrix Q; - Q << 0.05F, 0.0F, 0.0F, 0.0F, - 0.0F, 0.05F, 0.0F, 0.0F, - 0.0F, 0.0F, 0.1F, 0.0F, - 0.0F, 0.0F, 0.0F, 0.1F; + kf::Matrix Q; + Q << 0.05F, 0.0F, 0.0F, 0.0F, 0.0F, 0.05F, 0.0F, 0.0F, 0.0F, 0.0F, 0.1F, 0.0F, + 0.0F, 0.0F, 0.0F, 0.1F; - kf::Matrix R; - R << 0.01F, 0.0F, 0.0F, 0.01F; + kf::Matrix R; + R << 0.01F, 0.0F, 0.0F, 0.01F; - kf::Vector z; - z << 2.5F, 0.05F; + kf::Vector z; + z << 2.5F, 0.05F; - kf::UnscentedKalmanFilter ukf; + kf::UnscentedKalmanFilter ukf; - ukf.vecX() = x; - ukf.matP() = P; + ukf.vecX() = x; + ukf.matP() = P; - ukf.setCovarianceQ(Q); - ukf.setCovarianceR(R); + ukf.setCovarianceQ(Q); + ukf.setCovarianceR(R); - ukf.predictUKF(funcF); + ukf.predictUKF(funcF); - std::cout << "x = \n" << ukf.vecX() << std::endl; - std::cout << "P = \n" << ukf.matP() << std::endl; + std::cout << "x = \n" << ukf.vecX() << std::endl; + std::cout << "P = \n" << ukf.matP() << std::endl; - // Expectation from the python results: - // ===================================== - // x = - // [2.0 1.0 0.0 0.0] - // P = - // [[0.11 0.00 0.05 0.00] - // [0.00 0.11 0.00 0.05] - // [0.05 0.00 0.15 0.00] - // [0.00 0.05 0.00 0.15]] + // Expectation from the python results: + // ===================================== + // x = + // [2.0 1.0 0.0 0.0] + // P = + // [[0.11 0.00 0.05 0.00] + // [0.00 0.11 0.00 0.05] + // [0.05 0.00 0.15 0.00] + // [0.00 0.05 0.00 0.15]] - ukf.correctUKF(funcH, z); + ukf.correctUKF(funcH, z); - std::cout << "x = \n" << ukf.vecX() << std::endl; - std::cout << "P = \n" << ukf.matP() << std::endl; + std::cout << "x = \n" << ukf.vecX() << std::endl; + std::cout << "P = \n" << ukf.matP() << std::endl; - // Expectations from the python results: - // ====================================== - // x = - // [ 2.554 0.356 0.252 -0.293] - // P = - // [[ 0.01 -0.001 0.005 -0. ] - // [-0.001 0.01 - 0. 0.005 ] - // [ 0.005 - 0. 0.129 - 0. ] - // [-0. 0.005 - 0. 0.129]] + // Expectations from the python results: + // ====================================== + // x = + // [ 2.554 0.356 0.252 -0.293] + // P = + // [[ 0.01 -0.001 0.005 -0. ] + // [-0.001 0.01 - 0. 0.005 ] + // [ 0.005 - 0. 0.129 - 0. ] + // [-0. 0.005 - 0. 0.129]] - std::cout << " End of Example 1: ===========================" << std::endl; + std::cout << " End of Example 1: ===========================" << std::endl; } - diff --git a/src/examples/unscented_transform/main.cpp b/src/examples/unscented_transform/main.cpp index d0abcb3..64b6789 100644 --- a/src/examples/unscented_transform/main.cpp +++ b/src/examples/unscented_transform/main.cpp @@ -12,88 +12,88 @@ #include #include -#include "types.h" #include "kalman_filter/kalman_filter.h" #include "kalman_filter/unscented_transform.h" +#include "types.h" -static constexpr size_t DIM_1{ 1 }; -static constexpr size_t DIM_2{ 2 }; +static constexpr size_t DIM_1{1}; +static constexpr size_t DIM_2{2}; void runExample1(); void runExample2(); -kf::Vector function1(const kf::Vector & x) +kf::Vector function1(const kf::Vector& x) { - kf::Vector y; - y[0] = x[0] * x[0]; - return y; + kf::Vector y; + y[0] = x[0] * x[0]; + return y; } -kf::Vector function2(const kf::Vector & x) +kf::Vector function2(const kf::Vector& x) { - kf::Vector y; - y[0] = x[0] * x[0]; - y[1] = x[1] * x[1]; - return y; + kf::Vector y; + y[0] = x[0] * x[0]; + y[1] = x[1] * x[1]; + return y; } int main() { - // example 1 - runExample1(); + // example 1 + runExample1(); - // example 2 - runExample2(); + // example 2 + runExample2(); - return 0; + return 0; } void runExample1() { - std::cout << " Start of Example 1: ===========================" << std::endl; + std::cout << " Start of Example 1: ===========================" << std::endl; - kf::Vector x; - x << 0.0F; + kf::Vector x; + x << 0.0F; - kf::Matrix P; - P << 0.5F; + kf::Matrix P; + P << 0.5F; - kf::UnscentedTransform UT; - UT.compute(x, P, 0.0F); + kf::UnscentedTransform UT; + UT.compute(x, P, 0.0F); - kf::Vector vecY; - kf::Matrix matPyy; + kf::Vector vecY; + kf::Matrix matPyy; - UT.transform(function1, vecY, matPyy); + UT.transform(function1, vecY, matPyy); - UT.showSummary(); - std::cout << "vecY: \n" << vecY << "\n"; - std::cout << "matPyy: \n" << matPyy << "\n"; + UT.showSummary(); + std::cout << "vecY: \n" << vecY << "\n"; + std::cout << "matPyy: \n" << matPyy << "\n"; - std::cout << " End of Example 1: ===========================" << std::endl; + std::cout << " End of Example 1: ===========================" << std::endl; } void runExample2() { - std::cout << " Start of Example 2: ===========================" << std::endl; + std::cout << " Start of Example 2: ===========================" << std::endl; - kf::Vector x; - x << 2.0F, 1.0F; + kf::Vector x; + x << 2.0F, 1.0F; - kf::Matrix P; - P << 0.1F, 0.0F, 0.0F, 0.1F; + kf::Matrix P; + P << 0.1F, 0.0F, 0.0F, 0.1F; - kf::UnscentedTransform UT; - UT.compute(x, P, 0.0F); + kf::UnscentedTransform UT; + UT.compute(x, P, 0.0F); - kf::Vector vecY; - kf::Matrix matPyy; + kf::Vector vecY; + kf::Matrix matPyy; - UT.transform(function2, vecY, matPyy); + UT.transform(function2, vecY, matPyy); - UT.showSummary(); - std::cout << "vecY: \n" << vecY << "\n"; - std::cout << "matPyy: \n" << matPyy << "\n"; + UT.showSummary(); + std::cout << "vecY: \n" << vecY << "\n"; + std::cout << "matPyy: \n" << matPyy << "\n"; - std::cout << " End of Example 2: ===========================" << std::endl; + std::cout << " End of Example 2: ===========================" << std::endl; } diff --git a/src/openkf/kalman_filter/kalman_filter.h b/src/openkf/kalman_filter/kalman_filter.h index ac193ed..53e4f7f 100644 --- a/src/openkf/kalman_filter/kalman_filter.h +++ b/src/openkf/kalman_filter/kalman_filter.h @@ -12,120 +12,130 @@ #ifndef KALMAN_FILTER_LIB_H #define KALMAN_FILTER_LIB_H -#include "types.h" #include "motion_model/motion_model.h" +#include "types.h" namespace kf { - template - class KalmanFilter - { - public: - - KalmanFilter() - { - - } - - ~KalmanFilter() - { - - } - - virtual Vector & vecX() { return m_vecX; } - virtual const Vector & vecX() const { return m_vecX; } - - virtual Matrix & matP() { return m_matP; } - virtual const Matrix & matP() const { return m_matP; } - - /// - /// @brief predict state with a linear process model. - /// @param matF state transition matrix - /// @param matQ process noise covariance matrix - /// - void predictLKF(const Matrix & matF, const Matrix & matQ) - { - m_vecX = matF * m_vecX; - m_matP = matF * m_matP * matF.transpose() + matQ; - } - - /// - /// @brief correct state of with a linear measurement model. - /// @param matZ measurement vector - /// @param matR measurement noise covariance matrix - /// @param matH measurement transition matrix (measurement model) - /// - void correctLKF(const Vector & vecZ, const Matrix & matR, const Matrix & matH) - { - const Matrix matI{ Matrix::Identity() }; // Identity matrix - const Matrix matSk{ matH * m_matP * matH.transpose() + matR }; // Innovation covariance - const Matrix matKk{ m_matP * matH.transpose() * matSk.inverse() }; // Kalman Gain - - m_vecX = m_vecX + matKk * (vecZ - (matH * m_vecX)); - m_matP = (matI - matKk * matH) * m_matP; - } - - /// - /// @brief predict state with a linear process model. - /// @param predictionModel prediction model function callback - /// @param matJacobF state jacobian matrix - /// @param matQ process noise covariance matrix - /// - template - void predictEkf(PredictionModelCallback predictionModelFunc, const Matrix & matJacobF, const Matrix & matQ) - { - m_vecX = predictionModelFunc(m_vecX); - m_matP = matJacobF * m_matP * matJacobF.transpose() + matQ; - } - - /// - /// @brief predict state with a linear process model. - /// @param motionModel prediction motion model function - /// - void predictEkf(motionmodel::MotionModel const & motionModel) - { - Matrix const matFk{ motionModel.getJacobianFk(m_vecX) }; - Matrix const matQk{ motionModel.getProcessNoiseCov(m_vecX) }; - m_vecX = motionModel.f(m_vecX); - m_matP = matFk * m_matP * matFk.transpose() + matQk; - } - - /// - /// @brief predict state with a linear process model with external input. - /// @param motionModel prediction motion model function - /// @param vecU input vector - /// - template - void predictEkf(motionmodel::MotionModelExtInput const & motionModel, Vector const & vecU) - { - Matrix const matFk{ motionModel.getJacobianFk(m_vecX, vecU) }; - Matrix const matQk{ motionModel.getProcessNoiseCov(m_vecX, vecU) }; - m_vecX = motionModel.f(m_vecX, vecU); - m_matP = matFk * m_matP * matFk.transpose() + matQk; - } - - /// - /// @brief correct state of with a linear measurement model. - /// @param measurementModel measurement model function callback - /// @param matZ measurement vector - /// @param matR measurement noise covariance matrix - /// @param matJcobH measurement jacobian matrix - /// - template - void correctEkf(MeasurementModelCallback measurementModelFunc,const Vector & vecZ, const Matrix & matR, const Matrix & matJcobH) - { - const Matrix matI{ Matrix::Identity() }; // Identity matrix - const Matrix matSk{ matJcobH * m_matP * matJcobH.transpose() + matR }; // Innovation covariance - const Matrix matKk{ m_matP * matJcobH.transpose() * matSk.inverse() }; // Kalman Gain - - m_vecX = m_vecX + matKk * (vecZ - measurementModelFunc(m_vecX)); - m_matP = (matI - matKk * matJcobH) * m_matP; - } - - protected: - Vector m_vecX{ Vector::Zero() }; /// @brief estimated state vector - Matrix m_matP{ Matrix::Zero() }; /// @brief state covariance matrix - }; -} - -#endif // KALMAN_FILTER_LIB_H +template +class KalmanFilter +{ + public: + KalmanFilter() {} + + ~KalmanFilter() {} + + virtual Vector& vecX() { return m_vecX; } + virtual const Vector& vecX() const { return m_vecX; } + + virtual Matrix& matP() { return m_matP; } + virtual const Matrix& matP() const { return m_matP; } + + /// + /// @brief predict state with a linear process model. + /// @param matF state transition matrix + /// @param matQ process noise covariance matrix + /// + void predictLKF(const Matrix& matF, + const Matrix& matQ) + { + m_vecX = matF * m_vecX; + m_matP = matF * m_matP * matF.transpose() + matQ; + } + + /// + /// @brief correct state of with a linear measurement model. + /// @param matZ measurement vector + /// @param matR measurement noise covariance matrix + /// @param matH measurement transition matrix (measurement model) + /// + void correctLKF(const Vector& vecZ, const Matrix& matR, + const Matrix& matH) + { + const Matrix matI{ + Matrix::Identity()}; // Identity matrix + const Matrix matSk{matH * m_matP * matH.transpose() + + matR}; // Innovation covariance + const Matrix matKk{m_matP * matH.transpose() * + matSk.inverse()}; // Kalman Gain + + m_vecX = m_vecX + matKk * (vecZ - (matH * m_vecX)); + m_matP = (matI - matKk * matH) * m_matP; + } + + /// + /// @brief predict state with a linear process model. + /// @param predictionModel prediction model function callback + /// @param matJacobF state jacobian matrix + /// @param matQ process noise covariance matrix + /// + template + void predictEkf(PredictionModelCallback predictionModelFunc, + const Matrix& matJacobF, + const Matrix& matQ) + { + m_vecX = predictionModelFunc(m_vecX); + m_matP = matJacobF * m_matP * matJacobF.transpose() + matQ; + } + + /// + /// @brief predict state with a linear process model. + /// @param motionModel prediction motion model function + /// + void predictEkf(motionmodel::MotionModel const& motionModel) + { + Matrix const matFk{motionModel.getJacobianFk(m_vecX)}; + Matrix const matQk{motionModel.getProcessNoiseCov(m_vecX)}; + m_vecX = motionModel.f(m_vecX); + m_matP = matFk * m_matP * matFk.transpose() + matQk; + } + + /// + /// @brief predict state with a linear process model with external input. + /// @param motionModel prediction motion model function + /// @param vecU input vector + /// + template + void predictEkf( + motionmodel::MotionModelExtInput const& motionModel, + Vector const& vecU) + { + Matrix const matFk{motionModel.getJacobianFk(m_vecX, vecU)}; + Matrix const matQk{ + motionModel.getProcessNoiseCov(m_vecX, vecU)}; + m_vecX = motionModel.f(m_vecX, vecU); + m_matP = matFk * m_matP * matFk.transpose() + matQk; + } + + /// + /// @brief correct state of with a linear measurement model. + /// @param measurementModel measurement model function callback + /// @param matZ measurement vector + /// @param matR measurement noise covariance matrix + /// @param matJcobH measurement jacobian matrix + /// + template + void correctEkf(MeasurementModelCallback measurementModelFunc, + const Vector& vecZ, const Matrix& matR, + const Matrix& matJcobH) + { + const Matrix matI{ + Matrix::Identity()}; // Identity matrix + const Matrix matSk{matJcobH * m_matP * matJcobH.transpose() + + matR}; // Innovation covariance + const Matrix matKk{m_matP * matJcobH.transpose() * + matSk.inverse()}; // Kalman Gain + + m_vecX = m_vecX + matKk * (vecZ - measurementModelFunc(m_vecX)); + m_matP = (matI - matKk * matJcobH) * m_matP; + } + + protected: + Vector m_vecX{ + Vector::Zero()}; /// @brief estimated state vector + Matrix m_matP{ + Matrix::Zero()}; /// @brief state covariance matrix +}; +} // namespace kf + +#endif // KALMAN_FILTER_LIB_H diff --git a/src/openkf/kalman_filter/square_root_ukf.h b/src/openkf/kalman_filter/square_root_ukf.h index 8fb4d4c..e7f8571 100644 --- a/src/openkf/kalman_filter/square_root_ukf.h +++ b/src/openkf/kalman_filter/square_root_ukf.h @@ -12,348 +12,389 @@ #ifndef SQUARE_ROOT_UNSCENTED_KALMAN_FILTER_LIB_H #define SQUARE_ROOT_UNSCENTED_KALMAN_FILTER_LIB_H -#include "util.h" #include "kalman_filter.h" +#include "util.h" namespace kf { - template - class SquareRootUKF : public KalmanFilter +template +class SquareRootUKF : public KalmanFilter +{ + public: + static constexpr int32_t SIGMA_DIM{2 * DIM_X + 1}; + + SquareRootUKF() : KalmanFilter() + { + // calculate weights + const float32_t kappa{static_cast(3 - DIM_X)}; + updateWeights(kappa); + } + + ~SquareRootUKF() {} + + Matrix& matP() override + { + return (m_matP = m_matSk * m_matSk.transpose()); + } + // const Matrix & matP() const override { return (m_matP = + // m_matSk * m_matSk.transpose()); } + + void initialize(const Vector& vecX, const Matrix& matP, + const Matrix& matQ, + const Matrix& matR) + { + m_vecX = vecX; + + { + // cholesky factorization to get matrix Pk square-root + Eigen::LLT> lltOfP(matP); + m_matSk = lltOfP.matrixL(); // sqrt(P) + } + + { + // cholesky factorization to get matrix Q square-root + Eigen::LLT> lltOfRv(matQ); + m_matRv = lltOfRv.matrixL(); // sqrt(Q) + } + + { + // cholesky factorization to get matrix R square-root + Eigen::LLT> lltOfRn(matR); + m_matRn = lltOfRn.matrixL(); // sqrt(R) + } + } + + /// + /// @brief setting the cholesky factorization of state covariance P. + /// @param matP state covariance matrix + /// + void setCovarianceP(const Matrix& matP) + { + // cholesky factorization to get matrix Q square-root + Eigen::LLT> lltOfPk(matP); + m_matSk = lltOfPk.matrixL(); + } + + /// + /// @brief setting the cholesky factorization of process noise covariance Q. + /// @param matQ process noise covariance matrix + /// + void setCovarianceQ(const Matrix& matQ) + { + // cholesky factorization to get matrix Q square-root + Eigen::LLT> lltOfRv(matQ); + m_matRv = lltOfRv.matrixL(); + } + + /// + /// @brief setting the cholesky factorization of measurement noise covariance + /// R. + /// @param matR process noise covariance matrix + /// + void setCovarianceR(const Matrix& matR) + { + // cholesky factorization to get matrix R square-root + Eigen::LLT> lltOfRn(matR); + m_matRn = lltOfRn.matrixL(); // sqrt(R) + } + + /// + /// @brief state prediction step of the unscented Kalman filter (UKF). + /// @param predictionModelFunc callback to the prediction/process model + /// function + /// + template + void predictSRUKF(PredictionModelCallback predictionModelFunc) + { + const float32_t kappa{static_cast(3 - DIM_X)}; + + // x_sigmas = self.sigma_points(self.x, self.S) + Matrix matSigmaX{ + calculateSigmaPoints(m_vecX, m_matSk, kappa)}; + + // y_sigmas = np.zeros((self.dim_x, self.n_sigma)) + // for i in range(self.n_sigma): + // y_sigmas[:, i] = f(x_sigmas[:, i]) + for (int32_t i{0}; i < SIGMA_DIM; ++i) + { + const Vector Xi{util::getColumnAt(i, matSigmaX)}; + const Vector Yi{predictionModelFunc(Xi)}; // y = f(x) + + util::copyToColumn(i, matSigmaX, Yi); // Y[:, i] = y + } + + // calculate weighted mean of the predicted state + calculateWeightedMean(matSigmaX, m_vecX); + + // update of cholesky factorized state covariance Sk + m_matSk = updatePredictedCovariance(matSigmaX, m_vecX, m_matRv); + } + + /// + /// @brief measurement correction step of the unscented Kalman filter (UKF). + /// @param measurementModelFunc callback to the measurement model function + /// @param vecZ actual measurement vector. + /// + template + void correctSRUKF(MeasurementModelCallback measurementModelFunc, + const Vector& vecZ) + { + const float32_t kappa{static_cast(3 - DIM_X)}; + + // x_sigmas = self.sigma_points(self.x, self.S) + Matrix matSigmaX{ + calculateSigmaPoints(m_vecX, m_matSk, kappa)}; + + // y_sigmas = np.zeros((self.dim_x, self.n_sigma)) + // for i in range(self.n_sigma): + // y_sigmas[:, i] = f(x_sigmas[:, i]) + Matrix matSigmaY; + + for (int32_t i{0}; i < SIGMA_DIM; ++i) + { + const Vector Xi{util::getColumnAt(i, matSigmaX)}; + const Vector Yi{measurementModelFunc(Xi)}; // y = f(x) + + util::copyToColumn(i, matSigmaY, Yi); // Y[:, i] = y + } + + // calculate weighted mean of the predicted state + Vector vecY; + calculateWeightedMean(matSigmaY, vecY); + + // update of cholesky factorized state covariance Sk + const Matrix matSy{ + updatePredictedCovariance(matSigmaY, vecY, m_matRn)}; + + // cross-correlation + const Matrix matPxy{ + calculateCrossCorrelation(matSigmaX, m_vecX, matSigmaY, vecY)}; + + // Kalman Gain + Matrix matKk; + matKk = util::forwardSubstitute(matSy, matPxy); + matKk = util::backwardSubstitute(matSy.transpose(), matKk); + + // state vector correction + m_vecX += matKk * (vecZ - vecY); + + // state covariance correction + const Matrix matU{matKk * matSy}; + m_matSk = util::cholupdate(m_matSk, matU, -1.0); + } + + private: + using KalmanFilter::m_vecX; // from Base KalmanFilter class + using KalmanFilter::m_matP; // from Base KalmanFilter class + + float32_t m_weight0; /// @brief unscented transform weight 0 for mean + float32_t + m_weighti; /// @brief unscented transform weight i for none mean samples + + Matrix m_matSk{ + Matrix::Zero()}; /// @brief augmented state covariance (incl. + /// process and measurement noise covariances) + Matrix m_matRv{ + Matrix::Zero()}; /// @brief augmented state covariance (incl. + /// process and measurement noise covariances) + Matrix m_matRn{ + Matrix::Zero()}; /// @brief augmented state covariance (incl. + /// process and measurement noise covariances) + + /// + /// @brief algorithm to calculate the weights used to draw the sigma points + /// @param kappa design scaling parameter for sigma points selection + /// + void updateWeights(float32_t kappa) + { + static_assert(DIM_X > 0, "DIM_X is Zero which leads to numerical issue."); + + const float32_t denoTerm{kappa + static_cast(DIM_X)}; + + m_weight0 = kappa / denoTerm; + m_weighti = 0.5F / denoTerm; + } + + /// + /// @brief algorithm to calculate the deterministic sigma points for + /// the unscented transformation + /// + /// @param vecXk mean of the normally distributed state + /// @param matSk covariance of the normally distributed state + /// @param kappa design scaling parameter for sigma points selection + /// + Matrix calculateSigmaPoints( + const Vector& vecXk, const Matrix& matSk, + const float32_t kappa) + { + const Matrix matS{ + matSk * std::sqrt(DIM_X + kappa)}; // sqrt(n + \kappa) * Sk + + Matrix sigmaX; + + // X_0 = \bar{xa} + util::copyToColumn(0, sigmaX, vecXk); + + for (int32_t i{0}; i < DIM_X; ++i) + { + const int32_t IDX_1{i + 1}; + const int32_t IDX_2{i + DIM_X + 1}; + + util::copyToColumn(IDX_1, sigmaX, vecXk); + util::copyToColumn(IDX_2, sigmaX, vecXk); + + const Vector vecShiftTerm{ + util::getColumnAt(i, matS)}; + + util::addColumnFrom( + IDX_1, sigmaX, + vecShiftTerm); // X_i = \bar{x} + sqrt(n + \kappa) * Sk + util::subColumnFrom( + IDX_2, sigmaX, + vecShiftTerm); // X_{i+n} = \bar{x} - sqrt(n + \kappa) * Sk + } + + return sigmaX; + } + + /// + /// @brief calculate the weighted mean and covariance given a set of sigma + /// points + /// @param sigmaX matrix of sigma points where each column contain single + /// sigma point + /// @param vecX output weighted mean + /// + template + void calculateWeightedMean(const Matrix& sigmaX, + Vector& vecX) + { + // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] + vecX = m_weight0 * util::getColumnAt(0, sigmaX); + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + vecX += m_weighti * util::getColumnAt( + i, sigmaX); // y += W[0, i] Y[:, i] + } + } + + /// + /// @brief calculate the cross-correlation given two sets sigma points X and Y + /// and their means x and y + /// @param sigmaX first matrix of sigma points where each column contain + /// single sigma point + /// @param vecX mean of the first set of sigma points + /// @param sigmaY second matrix of sigma points where each column contain + /// single sigma point + /// @param vecY mean of the second set of sigma points + /// @return matPxy, the cross-correlation matrix + /// + Matrix calculateCrossCorrelation( + const Matrix& sigmaX, const Vector& vecX, + const Matrix& sigmaY, const Vector& vecY) + { + Vector devXi{util::getColumnAt(0, sigmaX) - + vecX}; // X[:, 0] - \bar{ x } + Vector devYi{util::getColumnAt(0, sigmaY) - + vecY}; // Y[:, 0] - \bar{ y } + + // P_0 = W[0, 0] (X[:, 0] - \bar{x}) (Y[:, 0] - \bar{y})^T + Matrix matPxy{m_weight0 * (devXi * devYi.transpose())}; + + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + devXi = util::getColumnAt(i, sigmaX) - + vecX; // X[:, i] - \bar{x} + devYi = util::getColumnAt(i, sigmaY) - + vecY; // Y[:, i] - \bar{y} + + matPxy += m_weighti * + (devXi * devYi.transpose()); // y += W[0, i] (Y[:, i] - + // \bar{y}) (Y[:, i] - \bar{y})^T + } + + return matPxy; + } + + template + Matrix updatePredictedCovariance( + const Matrix& matSigmaX, const Vector& meanX, + const Matrix& matU) + { + constexpr int32_t DIM_ROW{SIGMA_DIM + DIM - 1}; + constexpr int32_t DIM_COL{DIM}; + + // build compound matrix for square - root covariance update + // sigmas_X[:, 0] is not added because W0 could be zero which will lead + // to undefined outcome from sqrt(W0). + Matrix matC{buildCompoundMatrix(matSigmaX, meanX, matU)}; + + // calculate square - root covariance S using QR decomposition of compound + // matrix C including the process noise covariance + // _, S_minus = np.linalg.qr(C) + Eigen::HouseholderQR> qr(matC); + + Matrix qrMatrixUpper{qr.matrixQR()}; + qrMatrixUpper = + util::getUpperTriangulerView(qrMatrixUpper); + + // get the upper triangular matrix from the factorization + // might need to reimplement QR factorization it as it seems to use dynamic + // memory allocation Matrix matR{ + // util::getBlock( + // qr.matrixQR().triangularView(), 0, 0) + // }; + + Matrix matR{ + util::getBlock(qrMatrixUpper, 0, 0)}; + + // Rank - 1 cholesky update + // x_dev = sigmas_X[:, 0] - x_minus + // x_dev = np.reshape(x_dev, [-1, 1]) + // S_minus = cholupdate(S_minus.T, x_dev, self.W0) + // TODO: implement cholupdate, the one from Eigen is not straight forward to + // use + Vector devX0 = util::getColumnAt(0, matSigmaX) - meanX; + matR = util::cholupdate(matR.transpose(), devX0, m_weight0); + + return matR; + } + + Matrix buildCompoundMatrix( + const Matrix& matSigmaX, const Vector& meanX, + const Matrix& matU) + { + // build compoint/joint matrix for square-root covariance update + + constexpr int32_t DIM_ROW{SIGMA_DIM + DIM_X - 1}; + constexpr int32_t DIM_COL{DIM_X}; + + Matrix matC; + + // C = (sigmas_X[:, 1 : ].T - x_minus) * np.sqrt(self.Wi) + const float32_t sqrtWi{std::sqrt(m_weighti)}; + for (int32_t i{0}; i < DIM_X; ++i) + { + for (int32_t j{1}; j < SIGMA_DIM; ++j) + { + matC(j - 1, i) = sqrtWi * (matSigmaX(i, j) - meanX[i]); + } + } + + // C = np.concatenate((C, self.sqrt_Q.T), axis=0) + for (int32_t i{0}; i < DIM_X; ++i) { - public: - static constexpr int32_t SIGMA_DIM{ 2 * DIM_X + 1 }; - - SquareRootUKF() - : KalmanFilter() - { - // calculate weights - const float32_t kappa{ static_cast(3 - DIM_X) }; - updateWeights(kappa); - } - - ~SquareRootUKF() {} - - Matrix & matP() override { return (m_matP = m_matSk * m_matSk.transpose()); } - //const Matrix & matP() const override { return (m_matP = m_matSk * m_matSk.transpose()); } - - void initialize(const Vector & vecX, const Matrix & matP, const Matrix & matQ, const Matrix & matR) - { - m_vecX = vecX; - - { - // cholesky factorization to get matrix Pk square-root - Eigen::LLT> lltOfP(matP); - m_matSk = lltOfP.matrixL(); // sqrt(P) - } - - { - // cholesky factorization to get matrix Q square-root - Eigen::LLT> lltOfRv(matQ); - m_matRv = lltOfRv.matrixL(); // sqrt(Q) - } - - { - // cholesky factorization to get matrix R square-root - Eigen::LLT> lltOfRn(matR); - m_matRn = lltOfRn.matrixL(); // sqrt(R) - } - } - - /// - /// @brief setting the cholesky factorization of state covariance P. - /// @param matP state covariance matrix - /// - void setCovarianceP(const Matrix & matP) - { - // cholesky factorization to get matrix Q square-root - Eigen::LLT> lltOfPk(matP); - m_matSk = lltOfPk.matrixL(); - } - - /// - /// @brief setting the cholesky factorization of process noise covariance Q. - /// @param matQ process noise covariance matrix - /// - void setCovarianceQ(const Matrix & matQ) - { - // cholesky factorization to get matrix Q square-root - Eigen::LLT> lltOfRv(matQ); - m_matRv = lltOfRv.matrixL(); - } - - /// - /// @brief setting the cholesky factorization of measurement noise covariance R. - /// @param matR process noise covariance matrix - /// - void setCovarianceR(const Matrix & matR) - { - // cholesky factorization to get matrix R square-root - Eigen::LLT> lltOfRn(matR); - m_matRn = lltOfRn.matrixL(); // sqrt(R) - } - - /// - /// @brief state prediction step of the unscented Kalman filter (UKF). - /// @param predictionModelFunc callback to the prediction/process model function - /// - template - void predictSRUKF(PredictionModelCallback predictionModelFunc) - { - const float32_t kappa{ static_cast(3 - DIM_X) }; - - // x_sigmas = self.sigma_points(self.x, self.S) - Matrix matSigmaX{ calculateSigmaPoints(m_vecX, m_matSk, kappa) }; - - // y_sigmas = np.zeros((self.dim_x, self.n_sigma)) - // for i in range(self.n_sigma): - // y_sigmas[:, i] = f(x_sigmas[:, i]) - for (int32_t i{ 0 }; i < SIGMA_DIM; ++i) - { - const Vector Xi{ util::getColumnAt(i, matSigmaX) }; - const Vector Yi{ predictionModelFunc(Xi) }; // y = f(x) - - util::copyToColumn(i, matSigmaX, Yi); // Y[:, i] = y - } - - // calculate weighted mean of the predicted state - calculateWeightedMean(matSigmaX, m_vecX); - - // update of cholesky factorized state covariance Sk - m_matSk = updatePredictedCovariance(matSigmaX, m_vecX, m_matRv); - } - - /// - /// @brief measurement correction step of the unscented Kalman filter (UKF). - /// @param measurementModelFunc callback to the measurement model function - /// @param vecZ actual measurement vector. - /// - template - void correctSRUKF(MeasurementModelCallback measurementModelFunc, const Vector & vecZ) - { - const float32_t kappa{ static_cast(3 - DIM_X) }; - - // x_sigmas = self.sigma_points(self.x, self.S) - Matrix matSigmaX{ calculateSigmaPoints(m_vecX, m_matSk, kappa) }; - - // y_sigmas = np.zeros((self.dim_x, self.n_sigma)) - // for i in range(self.n_sigma): - // y_sigmas[:, i] = f(x_sigmas[:, i]) - Matrix matSigmaY; - - for (int32_t i{ 0 }; i < SIGMA_DIM; ++i) - { - const Vector Xi{ util::getColumnAt(i, matSigmaX) }; - const Vector Yi{ measurementModelFunc(Xi) }; // y = f(x) - - util::copyToColumn(i, matSigmaY, Yi); // Y[:, i] = y - } - - // calculate weighted mean of the predicted state - Vector vecY; - calculateWeightedMean(matSigmaY, vecY); - - // update of cholesky factorized state covariance Sk - const Matrix matSy{ - updatePredictedCovariance(matSigmaY, vecY, m_matRn) - }; - - // cross-correlation - const Matrix matPxy{ - calculateCrossCorrelation(matSigmaX, m_vecX, matSigmaY, vecY) - }; - - // Kalman Gain - Matrix matKk; - matKk = util::forwardSubstitute(matSy, matPxy); - matKk = util::backwardSubstitute(matSy.transpose(), matKk); - - // state vector correction - m_vecX += matKk * (vecZ - vecY); - - // state covariance correction - const Matrix matU{ matKk * matSy }; - m_matSk = util::cholupdate(m_matSk, matU, -1.0); - } - - private: - using KalmanFilter::m_vecX; // from Base KalmanFilter class - using KalmanFilter::m_matP; // from Base KalmanFilter class - - float32_t m_weight0; /// @brief unscented transform weight 0 for mean - float32_t m_weighti; /// @brief unscented transform weight i for none mean samples - - Matrix m_matSk{ Matrix::Zero() }; /// @brief augmented state covariance (incl. process and measurement noise covariances) - Matrix m_matRv{ Matrix::Zero() }; /// @brief augmented state covariance (incl. process and measurement noise covariances) - Matrix m_matRn{ Matrix::Zero() }; /// @brief augmented state covariance (incl. process and measurement noise covariances) - - /// - /// @brief algorithm to calculate the weights used to draw the sigma points - /// @param kappa design scaling parameter for sigma points selection - /// - void updateWeights(float32_t kappa) - { - static_assert(DIM_X > 0, "DIM_X is Zero which leads to numerical issue."); - - const float32_t denoTerm{ kappa + static_cast(DIM_X) }; - - m_weight0 = kappa / denoTerm; - m_weighti = 0.5F / denoTerm; - } - - /// - /// @brief algorithm to calculate the deterministic sigma points for - /// the unscented transformation - /// - /// @param vecXk mean of the normally distributed state - /// @param matSk covariance of the normally distributed state - /// @param kappa design scaling parameter for sigma points selection - /// - Matrix calculateSigmaPoints(const Vector & vecXk, const Matrix & matSk, const float32_t kappa) - { - const Matrix matS{ matSk * std::sqrt(DIM_X + kappa) }; // sqrt(n + \kappa) * Sk - - Matrix sigmaX; - - // X_0 = \bar{xa} - util::copyToColumn< DIM_X, SIGMA_DIM >(0, sigmaX, vecXk); - - for (int32_t i{ 0 }; i < DIM_X; ++i) - { - const int32_t IDX_1{ i + 1 }; - const int32_t IDX_2{ i + DIM_X + 1 }; - - util::copyToColumn< DIM_X, SIGMA_DIM >(IDX_1, sigmaX, vecXk); - util::copyToColumn< DIM_X, SIGMA_DIM >(IDX_2, sigmaX, vecXk); - - const Vector vecShiftTerm{ util::getColumnAt< DIM_X, DIM_X >(i, matS) }; - - util::addColumnFrom< DIM_X, SIGMA_DIM >(IDX_1, sigmaX, vecShiftTerm); // X_i = \bar{x} + sqrt(n + \kappa) * Sk - util::subColumnFrom< DIM_X, SIGMA_DIM >(IDX_2, sigmaX, vecShiftTerm); // X_{i+n} = \bar{x} - sqrt(n + \kappa) * Sk - } - - return sigmaX; - } - - /// - /// @brief calculate the weighted mean and covariance given a set of sigma points - /// @param sigmaX matrix of sigma points where each column contain single sigma point - /// @param vecX output weighted mean - /// - template - void calculateWeightedMean(const Matrix & sigmaX, Vector & vecX) - { - // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] - vecX = m_weight0 * util::getColumnAt(0, sigmaX); - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - vecX += m_weighti * util::getColumnAt(i, sigmaX); // y += W[0, i] Y[:, i] - } - } - - /// - /// @brief calculate the cross-correlation given two sets sigma points X and Y and their means x and y - /// @param sigmaX first matrix of sigma points where each column contain single sigma point - /// @param vecX mean of the first set of sigma points - /// @param sigmaY second matrix of sigma points where each column contain single sigma point - /// @param vecY mean of the second set of sigma points - /// @return matPxy, the cross-correlation matrix - /// - Matrix calculateCrossCorrelation( - const Matrix & sigmaX, const Vector & vecX, - const Matrix & sigmaY, const Vector & vecY) - { - Vector devXi{ util::getColumnAt(0, sigmaX) - vecX }; // X[:, 0] - \bar{ x } - Vector devYi{ util::getColumnAt(0, sigmaY) - vecY }; // Y[:, 0] - \bar{ y } - - // P_0 = W[0, 0] (X[:, 0] - \bar{x}) (Y[:, 0] - \bar{y})^T - Matrix matPxy{ - m_weight0 * (devXi * devYi.transpose()) - }; - - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - devXi = util::getColumnAt(i, sigmaX) - vecX; // X[:, i] - \bar{x} - devYi = util::getColumnAt(i, sigmaY) - vecY; // Y[:, i] - \bar{y} - - matPxy += m_weighti * (devXi * devYi.transpose()); // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - } - - return matPxy; - } - - template - Matrix updatePredictedCovariance( - const Matrix & matSigmaX, const Vector & meanX, const Matrix & matU) - { - constexpr int32_t DIM_ROW{ SIGMA_DIM + DIM - 1 }; - constexpr int32_t DIM_COL{ DIM }; - - // build compound matrix for square - root covariance update - // sigmas_X[:, 0] is not added because W0 could be zero which will lead - // to undefined outcome from sqrt(W0). - Matrix matC{ buildCompoundMatrix(matSigmaX, meanX, matU) }; - - // calculate square - root covariance S using QR decomposition of compound matrix C - // including the process noise covariance - // _, S_minus = np.linalg.qr(C) - Eigen::HouseholderQR< Matrix > qr(matC); - - Matrix qrMatrixUpper{ qr.matrixQR() }; - qrMatrixUpper = util::getUpperTriangulerView(qrMatrixUpper); - - // get the upper triangular matrix from the factorization - // might need to reimplement QR factorization it as it seems to use dynamic memory allocation - // Matrix matR{ - // util::getBlock( - // qr.matrixQR().triangularView(), 0, 0) - // }; - - Matrix matR{ util::getBlock(qrMatrixUpper, 0, 0) }; - - // Rank - 1 cholesky update - // x_dev = sigmas_X[:, 0] - x_minus - // x_dev = np.reshape(x_dev, [-1, 1]) - // S_minus = cholupdate(S_minus.T, x_dev, self.W0) - // TODO: implement cholupdate, the one from Eigen is not straight forward to use - Vector devX0 = util::getColumnAt(0, matSigmaX) - meanX; - matR = util::cholupdate(matR.transpose(), devX0, m_weight0); - - return matR; - } - - Matrix - buildCompoundMatrix( - const Matrix & matSigmaX, const Vector & meanX, const Matrix & matU) - { - // build compoint/joint matrix for square-root covariance update - - constexpr int32_t DIM_ROW{ SIGMA_DIM + DIM_X - 1 }; - constexpr int32_t DIM_COL{ DIM_X }; - - Matrix matC; - - // C = (sigmas_X[:, 1 : ].T - x_minus) * np.sqrt(self.Wi) - const float32_t sqrtWi{ std::sqrt(m_weighti) }; - for (int32_t i{ 0 }; i < DIM_X; ++i) - { - for (int32_t j{ 1 }; j < SIGMA_DIM; ++j) - { - matC(j - 1, i) = sqrtWi * (matSigmaX(i, j) - meanX[i]); - } - } - - // C = np.concatenate((C, self.sqrt_Q.T), axis=0) - for (int32_t i{ 0 }; i < DIM_X; ++i) - { - for (int32_t j{ 0 }; j < DIM_X; ++j) - { - matC(j + SIGMA_DIM - 1, i) = matU(i, j); - } - } - - return matC; - } - }; -} - -#endif // UNSCENTED_KALMAN_FILTER_LIB_H + for (int32_t j{0}; j < DIM_X; ++j) + { + matC(j + SIGMA_DIM - 1, i) = matU(i, j); + } + } + + return matC; + } +}; +} // namespace kf + +#endif // UNSCENTED_KALMAN_FILTER_LIB_H diff --git a/src/openkf/kalman_filter/unscented_kalman_filter.h b/src/openkf/kalman_filter/unscented_kalman_filter.h index 24fb81c..afcfeb1 100644 --- a/src/openkf/kalman_filter/unscented_kalman_filter.h +++ b/src/openkf/kalman_filter/unscented_kalman_filter.h @@ -12,323 +12,371 @@ #ifndef UNSCENTED_KALMAN_FILTER_LIB_H #define UNSCENTED_KALMAN_FILTER_LIB_H -#include "util.h" #include "kalman_filter.h" +#include "util.h" namespace kf { - template - class UnscentedKalmanFilter : public KalmanFilter +template +class UnscentedKalmanFilter : public KalmanFilter +{ + public: + static constexpr int32_t DIM_A{DIM_X + DIM_V + DIM_N}; + static constexpr int32_t SIGMA_DIM{2 * DIM_A + 1}; + + UnscentedKalmanFilter() : KalmanFilter() + { + // 1. calculate weights + const float32_t kappa{static_cast(3 - DIM_A)}; + updateWeights(kappa); + } + + ~UnscentedKalmanFilter() {} + + /// + /// @brief adding process noise covariance Q to the augmented state covariance + /// matPa in the middle element of the diagonal. + /// + void setCovarianceQ(const Matrix& matQ) + { + const int32_t S_IDX{DIM_X}; + const int32_t L_IDX{S_IDX + DIM_V}; + + for (int32_t i{S_IDX}; i < L_IDX; ++i) + { + for (int32_t j{S_IDX}; j < L_IDX; ++j) + { + m_matPa(i, j) = matQ(i - S_IDX, j - S_IDX); + } + } + } + + /// + /// @brief adding measurement noise covariance R to the augmented state + /// covariance matPa in the third element of the diagonal. + /// + void setCovarianceR(const Matrix& matR) + { + const int32_t S_IDX{DIM_X + DIM_V}; + const int32_t L_IDX{S_IDX + DIM_N}; + + for (int32_t i{S_IDX}; i < L_IDX; ++i) + { + for (int32_t j{S_IDX}; j < L_IDX; ++j) + { + m_matPa(i, j) = matR(i - S_IDX, j - S_IDX); + } + } + } + + /// + /// @brief adding vecX and matP to the augmented state vector and covariance + /// vecXa and matPa + /// + void updateAugmentedStateAndCovariance() + { + updateAugmentedVecX(); + updateAugmentedMatP(); + } + + /// + /// @brief state prediction step of the unscented Kalman filter (UKF). + /// @param predictionModelFunc callback to the prediction/process model + /// function + /// + template + void predictUKF(PredictionModelCallback predictionModelFunc) + { + // self.x_a[:self.dim_x] = x + // self.P_a[:self.dim_x, : self.dim_x] = P + updateAugmentedStateAndCovariance(); + + const float32_t kappa{static_cast(3 - DIM_A)}; + + // xa_sigmas = self.sigma_points(self.x_a, self.P_a) + Matrix matSigmaXa{ + calculateSigmaPoints(m_vecXa, m_matPa, kappa)}; + + // xx_sigmas = xa_sigmas[:self.dim_x, :] + // Matrix sigmaXx{ matSigmaXa.block(0, + // 0) }; + Matrix sigmaXx{matSigmaXa.block(0, 0, DIM_X, SIGMA_DIM)}; + + // xv_sigmas = xa_sigmas[self.dim_x : self.dim_x + self.dim_v, :] + Matrix sigmaXv{ + matSigmaXa.block(DIM_X, 0, DIM_V, SIGMA_DIM)}; + + // y_sigmas = np.zeros((self.dim_x, self.n_sigma)) + // for i in range(self.n_sigma): + // y_sigmas[:, i] = f(xx_sigmas[:, i], xv_sigmas[:, i]) + for (int32_t i{0}; i < SIGMA_DIM; ++i) + { + const Vector sigmaXxi{ + util::getColumnAt(i, sigmaXx)}; + const Vector sigmaXvi{ + util::getColumnAt(i, sigmaXv)}; + + const Vector Yi{ + predictionModelFunc(sigmaXxi, sigmaXvi)}; // y = f(x) + + util::copyToColumn(i, sigmaXx, Yi); // Y[:, i] = y + } + + // y, Pyy = self.calculate_mean_and_covariance(y_sigmas) + calculateWeightedMeanAndCovariance(sigmaXx, m_vecX, m_matP); + + //// self.x_a[:self.dim_x] = y + //// self.P_a[:self.dim_x, : self.dim_x] = Pyy + // updateAugmentedStateAndCovariance(); + } + + /// + /// @brief measurement correction step of the unscented Kalman filter (UKF). + /// @param measurementModelFunc callback to the measurement model function + /// @param vecZ actual measurement vector. + /// + template + void correctUKF(MeasurementModelCallback measurementModelFunc, + const Vector& vecZ) + { + // self.x_a[:self.dim_x] = x + // self.P_a[:self.dim_x, : self.dim_x] = P + updateAugmentedStateAndCovariance(); + + const float32_t kappa{static_cast(3 - DIM_A)}; + + // xa_sigmas = self.sigma_points(self.x_a, self.P_a) + Matrix matSigmaXa{ + calculateSigmaPoints(m_vecXa, m_matPa, kappa)}; + + // xx_sigmas = xa_sigmas[:self.dim_x, :] + Matrix sigmaXx{matSigmaXa.block(0, 0, DIM_X, SIGMA_DIM)}; + + // xn_sigmas = xa_sigmas[self.dim_x + self.dim_v :, :] + Matrix sigmaXn{ + matSigmaXa.block(DIM_X + DIM_V, 0, DIM_N, SIGMA_DIM)}; + + // y_sigmas = np.zeros((self.dim_z, self.n_sigma)) + // for i in range(self.n_sigma) : + // y_sigmas[:, i] = h(xx_sigmas[:, i], xn_sigmas[:, i]) + Matrix sigmaY; + for (int32_t i{0}; i < SIGMA_DIM; ++i) + { + const Vector sigmaXxi{ + util::getColumnAt(i, sigmaXx)}; + const Vector sigmaXni{ + util::getColumnAt(i, sigmaXn)}; + + const Vector Yi{ + measurementModelFunc(sigmaXxi, sigmaXni)}; // y = f(x) + + util::copyToColumn(i, sigmaY, Yi); // Y[:, i] = y + } + + // y, Pyy = self.calculate_mean_and_covariance(y_sigmas) + Vector vecY; + Matrix matPyy; + calculateWeightedMeanAndCovariance(sigmaY, vecY, matPyy); + + // TODO: calculate cross correlation + const Matrix matPxy{ + calculateCrossCorrelation(sigmaXx, m_vecX, sigmaY, vecY)}; + + // kalman gain + const Matrix matK{matPxy * matPyy.inverse()}; + + m_vecX += matK * (vecZ - vecY); + m_matP -= matK * matPyy * matK.transpose(); + + //// self.x_a[:self.dim_x] = x + //// self.P_a[:self.dim_x, : self.dim_x] = P + // updateAugmentedStateAndCovariance(); + } + + private: + using KalmanFilter::m_vecX; // from Base KalmanFilter class + using KalmanFilter::m_matP; // from Base KalmanFilter class + + float32_t m_weight0; /// @brief unscented transform weight 0 for mean + float32_t + m_weighti; /// @brief unscented transform weight i for none mean samples + + Vector m_vecXa{ + Vector::Zero()}; /// @brief augmented state vector (incl. process + /// and measurement noise means) + Matrix m_matPa{ + Matrix::Zero()}; /// @brief augmented state covariance (incl. + /// process and measurement noise covariances) + + /// + /// @brief add state vector m_vecX to the augment state vector m_vecXa + /// + void updateAugmentedVecX() + { + for (int32_t i{0}; i < DIM_X; ++i) + { + m_vecXa[i] = m_vecX[i]; + } + } + + /// + /// @brief add covariance matrix m_matP to the augment covariance m_matPa + /// + void updateAugmentedMatP() + { + for (int32_t i{0}; i < DIM_X; ++i) + { + for (int32_t j{0}; j < DIM_X; ++j) + { + m_matPa(i, j) = m_matP(i, j); + } + } + } + + /// + /// @brief algorithm to calculate the weights used to draw the sigma points + /// @param kappa design scaling parameter for sigma points selection + /// + void updateWeights(float32_t kappa) + { + static_assert(DIM_A > 0, "DIM_A is Zero which leads to numerical issue."); + + const float32_t denoTerm{kappa + static_cast(DIM_A)}; + + m_weight0 = kappa / denoTerm; + m_weighti = 0.5F / denoTerm; + } + + /// + /// @brief algorithm to calculate the deterministic sigma points for + /// the unscented transformation + /// + /// @param vecX mean of the normally distributed state + /// @param matPxx covariance of the normally distributed state + /// @param kappa design scaling parameter for sigma points selection + /// + Matrix calculateSigmaPoints( + const Vector& vecXa, const Matrix& matPa, + const float32_t kappa) + { + const float32_t scalarMultiplier{ + std::sqrt(DIM_A + kappa)}; // sqrt(n + \kappa) + + // cholesky factorization to get matrix Pxx square-root + Eigen::LLT> lltOfPa(matPa); + Matrix matSa{lltOfPa.matrixL()}; // sqrt(P_{a}) + + matSa *= scalarMultiplier; // sqrt( (n + \kappa) * P_{a} ) + + Matrix sigmaXa; + + // X_0 = \bar{xa} + util::copyToColumn(0, sigmaXa, vecXa); + + for (int32_t i{0}; i < DIM_A; ++i) + { + const int32_t IDX_1{i + 1}; + const int32_t IDX_2{i + DIM_A + 1}; + + util::copyToColumn(IDX_1, sigmaXa, vecXa); + util::copyToColumn(IDX_2, sigmaXa, vecXa); + + const Vector vecShiftTerm{ + util::getColumnAt(i, matSa)}; + + util::addColumnFrom( + IDX_1, sigmaXa, vecShiftTerm); // X_i^a = \bar{xa} + sqrt( (n^a + + // \kappa) * P^{a} ) + util::subColumnFrom( + IDX_2, sigmaXa, vecShiftTerm); // X_{i+n}^a = \bar{xa} - sqrt( (n^a + + // \kappa) * P^{a} ) + } + + return sigmaXa; + } + + /// + /// @brief calculate the weighted mean and covariance given a set of sigma + /// points + /// @param sigmaX matrix of sigma points where each column contain single + /// sigma point + /// @param vecX output weighted mean + /// @param matP output weighted covariance + /// + template + void calculateWeightedMeanAndCovariance( + const Matrix& sigmaX, Vector& vecX, + Matrix& matPxx) + { + // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] + vecX = m_weight0 * util::getColumnAt(0, sigmaX); + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + vecX += m_weighti * util::getColumnAt( + i, sigmaX); // y += W[0, i] Y[:, i] + } + + // 2. calculate covariance: P_{yy} = \sum_{i_0}^{2n} W[0, i] (Y[:, i] - + // \bar{y}) (Y[:, i] - \bar{y})^T + Vector devXi{util::getColumnAt(0, sigmaX) - + vecX}; // Y[:, 0] - \bar{ y } + matPxx = m_weight0 * devXi * + devXi.transpose(); // P_0 = W[0, 0] (Y[:, 0] - \bar{y}) (Y[:, 0] - + // \bar{y})^T + + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + devXi = util::getColumnAt(i, sigmaX) - + vecX; // Y[:, i] - \bar{y} + + const Matrix Pi{ + m_weighti * devXi * + devXi.transpose()}; // P_i = W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - + // \bar{y})^T + + matPxx += Pi; // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T + } + } + + /// + /// @brief calculate the cross-correlation given two sets sigma points X and Y + /// and their means x and y + /// @param sigmaX first matrix of sigma points where each column contain + /// single sigma point + /// @param vecX mean of the first set of sigma points + /// @param sigmaY second matrix of sigma points where each column contain + /// single sigma point + /// @param vecY mean of the second set of sigma points + /// @return matPxy, the cross-correlation matrix + /// + Matrix calculateCrossCorrelation( + const Matrix& sigmaX, const Vector& vecX, + const Matrix& sigmaY, const Vector& vecY) + { + Vector devXi{util::getColumnAt(0, sigmaX) - + vecX}; // X[:, 0] - \bar{ x } + Vector devYi{util::getColumnAt(0, sigmaY) - + vecY}; // Y[:, 0] - \bar{ y } + + // P_0 = W[0, 0] (X[:, 0] - \bar{x}) (Y[:, 0] - \bar{y})^T + Matrix matPxy{m_weight0 * (devXi * devYi.transpose())}; + + for (int32_t i{1}; i < SIGMA_DIM; ++i) { - public: - static constexpr int32_t DIM_A{ DIM_X + DIM_V + DIM_N }; - static constexpr int32_t SIGMA_DIM{ 2 * DIM_A + 1 }; - - UnscentedKalmanFilter() - : KalmanFilter() - { - // 1. calculate weights - const float32_t kappa{ static_cast(3 - DIM_A) }; - updateWeights(kappa); - } - - ~UnscentedKalmanFilter() {} - - /// - /// @brief adding process noise covariance Q to the augmented state covariance matPa - /// in the middle element of the diagonal. - /// - void setCovarianceQ(const Matrix & matQ) - { - const int32_t S_IDX{ DIM_X }; - const int32_t L_IDX{ S_IDX + DIM_V }; - - for (int32_t i{ S_IDX }; i < L_IDX; ++i) - { - for (int32_t j{ S_IDX }; j < L_IDX; ++j) - { - m_matPa(i, j) = matQ(i - S_IDX, j - S_IDX); - } - } - } - - /// - /// @brief adding measurement noise covariance R to the augmented state covariance matPa - /// in the third element of the diagonal. - /// - void setCovarianceR(const Matrix & matR) - { - const int32_t S_IDX{ DIM_X + DIM_V }; - const int32_t L_IDX{ S_IDX + DIM_N }; - - for (int32_t i{ S_IDX }; i < L_IDX; ++i) - { - for (int32_t j{ S_IDX }; j < L_IDX; ++j) - { - m_matPa(i, j) = matR(i - S_IDX, j - S_IDX); - } - } - } - - /// - /// @brief adding vecX and matP to the augmented state vector and covariance vecXa and matPa - /// - void updateAugmentedStateAndCovariance() - { - updateAugmentedVecX(); - updateAugmentedMatP(); - } - - /// - /// @brief state prediction step of the unscented Kalman filter (UKF). - /// @param predictionModelFunc callback to the prediction/process model function - /// - template - void predictUKF(PredictionModelCallback predictionModelFunc) - { - // self.x_a[:self.dim_x] = x - // self.P_a[:self.dim_x, : self.dim_x] = P - updateAugmentedStateAndCovariance(); - - const float32_t kappa{ static_cast(3 - DIM_A) }; - - // xa_sigmas = self.sigma_points(self.x_a, self.P_a) - Matrix matSigmaXa{ calculateSigmaPoints(m_vecXa, m_matPa, kappa) }; - - // xx_sigmas = xa_sigmas[:self.dim_x, :] - //Matrix sigmaXx{ matSigmaXa.block(0, 0) }; - Matrix sigmaXx{ matSigmaXa.block(0, 0, DIM_X, SIGMA_DIM) }; - - // xv_sigmas = xa_sigmas[self.dim_x : self.dim_x + self.dim_v, :] - Matrix sigmaXv{ matSigmaXa.block(DIM_X, 0, DIM_V, SIGMA_DIM) }; - - // y_sigmas = np.zeros((self.dim_x, self.n_sigma)) - // for i in range(self.n_sigma): - // y_sigmas[:, i] = f(xx_sigmas[:, i], xv_sigmas[:, i]) - for (int32_t i{ 0 }; i < SIGMA_DIM; ++i) - { - const Vector sigmaXxi{ util::getColumnAt(i, sigmaXx) }; - const Vector sigmaXvi{ util::getColumnAt(i, sigmaXv) }; - - const Vector Yi{ predictionModelFunc(sigmaXxi, sigmaXvi) }; // y = f(x) - - util::copyToColumn(i, sigmaXx, Yi); // Y[:, i] = y - } - - // y, Pyy = self.calculate_mean_and_covariance(y_sigmas) - calculateWeightedMeanAndCovariance(sigmaXx, m_vecX, m_matP); - - //// self.x_a[:self.dim_x] = y - //// self.P_a[:self.dim_x, : self.dim_x] = Pyy - //updateAugmentedStateAndCovariance(); - } - - /// - /// @brief measurement correction step of the unscented Kalman filter (UKF). - /// @param measurementModelFunc callback to the measurement model function - /// @param vecZ actual measurement vector. - /// - template - void correctUKF(MeasurementModelCallback measurementModelFunc, const Vector & vecZ) - { - // self.x_a[:self.dim_x] = x - // self.P_a[:self.dim_x, : self.dim_x] = P - updateAugmentedStateAndCovariance(); - - const float32_t kappa{ static_cast(3 - DIM_A) }; - - // xa_sigmas = self.sigma_points(self.x_a, self.P_a) - Matrix matSigmaXa{ calculateSigmaPoints(m_vecXa, m_matPa, kappa) }; - - // xx_sigmas = xa_sigmas[:self.dim_x, :] - Matrix sigmaXx{ matSigmaXa.block(0, 0, DIM_X, SIGMA_DIM) }; - - // xn_sigmas = xa_sigmas[self.dim_x + self.dim_v :, :] - Matrix sigmaXn{ matSigmaXa.block(DIM_X + DIM_V, 0, DIM_N, SIGMA_DIM) }; - - // y_sigmas = np.zeros((self.dim_z, self.n_sigma)) - // for i in range(self.n_sigma) : - // y_sigmas[:, i] = h(xx_sigmas[:, i], xn_sigmas[:, i]) - Matrix sigmaY; - for (int32_t i{ 0 }; i < SIGMA_DIM; ++i) - { - const Vector sigmaXxi{ util::getColumnAt(i, sigmaXx) }; - const Vector sigmaXni{ util::getColumnAt(i, sigmaXn) }; - - const Vector Yi{ measurementModelFunc(sigmaXxi, sigmaXni) }; // y = f(x) - - util::copyToColumn(i, sigmaY, Yi); // Y[:, i] = y - } - - // y, Pyy = self.calculate_mean_and_covariance(y_sigmas) - Vector vecY; - Matrix matPyy; - calculateWeightedMeanAndCovariance(sigmaY, vecY, matPyy); - - // TODO: calculate cross correlation - const Matrix matPxy{ calculateCrossCorrelation(sigmaXx, m_vecX, sigmaY, vecY) }; - - // kalman gain - const Matrix matK{ matPxy * matPyy.inverse() }; - - m_vecX += matK * (vecZ - vecY); - m_matP -= matK * matPyy * matK.transpose(); - - //// self.x_a[:self.dim_x] = x - //// self.P_a[:self.dim_x, : self.dim_x] = P - //updateAugmentedStateAndCovariance(); - } - - private: - using KalmanFilter::m_vecX; // from Base KalmanFilter class - using KalmanFilter::m_matP; // from Base KalmanFilter class - - float32_t m_weight0; /// @brief unscented transform weight 0 for mean - float32_t m_weighti; /// @brief unscented transform weight i for none mean samples - - Vector m_vecXa{ Vector::Zero() }; /// @brief augmented state vector (incl. process and measurement noise means) - Matrix m_matPa{ Matrix::Zero() }; /// @brief augmented state covariance (incl. process and measurement noise covariances) - - /// - /// @brief add state vector m_vecX to the augment state vector m_vecXa - /// - void updateAugmentedVecX() - { - for (int32_t i{ 0 }; i < DIM_X; ++i) - { - m_vecXa[i] = m_vecX[i]; - } - } - - /// - /// @brief add covariance matrix m_matP to the augment covariance m_matPa - /// - void updateAugmentedMatP() - { - for (int32_t i{ 0 }; i < DIM_X; ++i) - { - for (int32_t j{ 0 }; j < DIM_X; ++j) - { - m_matPa(i, j) = m_matP(i, j); - } - } - } - - /// - /// @brief algorithm to calculate the weights used to draw the sigma points - /// @param kappa design scaling parameter for sigma points selection - /// - void updateWeights(float32_t kappa) - { - static_assert(DIM_A > 0, "DIM_A is Zero which leads to numerical issue."); - - const float32_t denoTerm{ kappa + static_cast(DIM_A) }; - - m_weight0 = kappa / denoTerm; - m_weighti = 0.5F / denoTerm; - } - - /// - /// @brief algorithm to calculate the deterministic sigma points for - /// the unscented transformation - /// - /// @param vecX mean of the normally distributed state - /// @param matPxx covariance of the normally distributed state - /// @param kappa design scaling parameter for sigma points selection - /// - Matrix calculateSigmaPoints(const Vector & vecXa, const Matrix & matPa, const float32_t kappa) - { - const float32_t scalarMultiplier{ std::sqrt(DIM_A + kappa) }; // sqrt(n + \kappa) - - // cholesky factorization to get matrix Pxx square-root - Eigen::LLT> lltOfPa(matPa); - Matrix matSa{ lltOfPa.matrixL() }; // sqrt(P_{a}) - - matSa *= scalarMultiplier; // sqrt( (n + \kappa) * P_{a} ) - - Matrix sigmaXa; - - // X_0 = \bar{xa} - util::copyToColumn< DIM_A, SIGMA_DIM >(0, sigmaXa, vecXa); - - for (int32_t i{ 0 }; i < DIM_A; ++i) - { - const int32_t IDX_1{ i + 1 }; - const int32_t IDX_2{ i + DIM_A + 1 }; - - util::copyToColumn< DIM_A, SIGMA_DIM >(IDX_1, sigmaXa, vecXa); - util::copyToColumn< DIM_A, SIGMA_DIM >(IDX_2, sigmaXa, vecXa); - - const Vector vecShiftTerm{ util::getColumnAt< DIM_A, DIM_A >(i, matSa) }; - - util::addColumnFrom< DIM_A, SIGMA_DIM >(IDX_1, sigmaXa, vecShiftTerm); // X_i^a = \bar{xa} + sqrt( (n^a + \kappa) * P^{a} ) - util::subColumnFrom< DIM_A, SIGMA_DIM >(IDX_2, sigmaXa, vecShiftTerm); // X_{i+n}^a = \bar{xa} - sqrt( (n^a + \kappa) * P^{a} ) - } - - return sigmaXa; - } - - /// - /// @brief calculate the weighted mean and covariance given a set of sigma points - /// @param sigmaX matrix of sigma points where each column contain single sigma point - /// @param vecX output weighted mean - /// @param matP output weighted covariance - /// - template - void calculateWeightedMeanAndCovariance(const Matrix & sigmaX, Vector & vecX, Matrix & matPxx) - { - // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] - vecX = m_weight0 * util::getColumnAt(0, sigmaX); - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - vecX += m_weighti * util::getColumnAt(i, sigmaX); // y += W[0, i] Y[:, i] - } - - // 2. calculate covariance: P_{yy} = \sum_{i_0}^{2n} W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - Vector devXi{ util::getColumnAt(0, sigmaX) - vecX }; // Y[:, 0] - \bar{ y } - matPxx = m_weight0 * devXi * devXi.transpose(); // P_0 = W[0, 0] (Y[:, 0] - \bar{y}) (Y[:, 0] - \bar{y})^T - - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - devXi = util::getColumnAt(i, sigmaX) - vecX; // Y[:, i] - \bar{y} - - const Matrix Pi{ m_weighti * devXi * devXi.transpose() }; // P_i = W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - - matPxx += Pi; // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - } - } - - /// - /// @brief calculate the cross-correlation given two sets sigma points X and Y and their means x and y - /// @param sigmaX first matrix of sigma points where each column contain single sigma point - /// @param vecX mean of the first set of sigma points - /// @param sigmaY second matrix of sigma points where each column contain single sigma point - /// @param vecY mean of the second set of sigma points - /// @return matPxy, the cross-correlation matrix - /// - Matrix calculateCrossCorrelation( - const Matrix & sigmaX, const Vector & vecX, - const Matrix & sigmaY, const Vector & vecY) - { - Vector devXi{ util::getColumnAt(0, sigmaX) - vecX }; // X[:, 0] - \bar{ x } - Vector devYi{ util::getColumnAt(0, sigmaY) - vecY }; // Y[:, 0] - \bar{ y } - - // P_0 = W[0, 0] (X[:, 0] - \bar{x}) (Y[:, 0] - \bar{y})^T - Matrix matPxy{ - m_weight0 * (devXi * devYi.transpose()) - }; - - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - devXi = util::getColumnAt(i, sigmaX) - vecX; // X[:, i] - \bar{x} - devYi = util::getColumnAt(i, sigmaY) - vecY; // Y[:, i] - \bar{y} - - matPxy += m_weighti * (devXi * devYi.transpose()); // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - } - - return matPxy; - } - }; -} - -#endif // UNSCENTED_KALMAN_FILTER_LIB_H + devXi = util::getColumnAt(i, sigmaX) - + vecX; // X[:, i] - \bar{x} + devYi = util::getColumnAt(i, sigmaY) - + vecY; // Y[:, i] - \bar{y} + + matPxy += m_weighti * + (devXi * devYi.transpose()); // y += W[0, i] (Y[:, i] - + // \bar{y}) (Y[:, i] - \bar{y})^T + } + + return matPxy; + } +}; +} // namespace kf + +#endif // UNSCENTED_KALMAN_FILTER_LIB_H diff --git a/src/openkf/kalman_filter/unscented_transform.h b/src/openkf/kalman_filter/unscented_transform.h index 5615263..25bf913 100644 --- a/src/openkf/kalman_filter/unscented_transform.h +++ b/src/openkf/kalman_filter/unscented_transform.h @@ -12,200 +12,236 @@ #ifndef UNSCENTED_TRANSFORM_H #define UNSCENTED_TRANSFORM_H +#include "Eigen/Dense" +#include "Eigen/src/Cholesky/LLT.h" #include "types.h" #include "util.h" -#include "Eigen/src/Cholesky/LLT.h" +#include namespace kf { - template - class UnscentedTransform +template +class UnscentedTransform +{ + public: + static constexpr int32_t SIGMA_DIM{(2 * DIM) + 1}; + + UnscentedTransform() {} + ~UnscentedTransform() {} + + float32_t weight0() const { return _weights[0]; } + float32_t weighti() const { return _weights[1]; } + + /// + /// @brief algorithm to execute weight and sigma points calculation + /// @param vecX input mean vector + /// @param matPxx input covariance matrix + /// @param kappa design parameter + /// + void compute(const Vector& vecX, const Matrix& matPxx, + const float32_t kappa = 0.0F) + { + // 1. calculate weights + updateWeights(kappa); + + // 2. update sigma points _sigmaX + updateSigmaPoints(vecX, matPxx, kappa); + } + + template + void calculateWeightedMeanAndCovariance( + const Matrix& sigmaX, Vector& vecX, + Matrix& matPxx) + { + // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] + vecX = _weights[0] * util::getColumnAt(0, sigmaX); + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + vecX += _weights[1] * util::getColumnAt( + i, sigmaX); // y += W[0, i] Y[:, i] + } + + // 2. calculate covariance: P_{yy} = \sum_{i_0}^{2n} W[0, i] (Y[:, i] - + // \bar{y}) (Y[:, i] - \bar{y})^T + Vector devXi{util::getColumnAt(0, sigmaX) - + vecX}; // Y[:, 0] - \bar{ y } + matPxx = _weights[0] * devXi * + devXi.transpose(); // P_0 = W[0, 0] (Y[:, 0] - \bar{y}) (Y[:, 0] - + // \bar{y})^T + + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + devXi = util::getColumnAt(i, sigmaX) - + vecX; // Y[:, i] - \bar{y} + + const Matrix Pi{ + _weights[1] * devXi * + devXi.transpose()}; // P_i = W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - + // \bar{y})^T + + matPxx += Pi; // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T + } + } + + /// + /// @brief algorithm to execute transforming sigma points through nonlinear + /// function and calculate the updated weights and covariance. + /// @param nonlinearFunction nonlinear function callback + /// @param vecY output mean vector (weighted mean vector) + /// @param matPyy output covariance matrix (weighted covariance matrix) + /// + template + void transform(NonLinearFunctionCallback nonlinearFunction, Vector& vecY, + Matrix& matPyy) + { + Matrix sigmaY; + + // 3. transform sigma points X through the nonlinear model f(X) to obtain Y + // sigma points + transformSigmaPoints(nonlinearFunction, sigmaY); + + // 4. calculate weight mean and covariance from the transformed sigma points + // Y and weights + updateTransformedMeanAndCovariance(sigmaY, vecY, matPyy); + } + + void showSummary() + { + std::cout << "DIM_N : " << DIM << "\n"; + std::cout << "DIM_M : " << SIGMA_DIM << "\n"; + std::cout << "_weights[0] : \n" << _weights[0] << "\n"; + std::cout << "_weights[1] : \n" << _weights[1] << "\n"; + std::cout << "_sigmaX : \n" << _sigmaX << "\n"; + } + + Matrix<1, 2>& weights() { return _weights; } + const Matrix<1, 2>& weights() const { return _weights; } + + Matrix& sigmaX() { return _sigmaX; } + const Matrix& sigmaX() const { return _sigmaX; } + + private: + /// @brief unscented transform weight 0 for mean + Matrix<1, 2> _weights; + + /// @brief input sigma points + Matrix _sigmaX; + + /// + /// @brief algorithm to calculate the weights used to draw the sigma points + /// @param kappa design scaling parameter for sigma points selection + /// + void updateWeights(float32_t kappa) + { + static_assert(DIM > 0, "DIM is Zero which leads to numerical issue."); + + const float32_t denoTerm{kappa + static_cast(DIM)}; + + _weights[0] = kappa / denoTerm; + _weights[1] = 0.5F / denoTerm; + } + + /// + /// @brief algorithm to calculate the deterministic sigma points for + /// the unscented transformation + /// + /// @param vecX mean of the normally distributed state + /// @param matPxx covariance of the normally distributed state + /// @param kappa design scaling parameter for sigma points selection + /// + void updateSigmaPoints(const Vector& vecX, + const Matrix& matPxx, const float32_t kappa) + { + const float32_t scalarMultiplier{ + std::sqrt(DIM + kappa)}; // sqrt(n + \kappa) + + // cholesky factorization to get matrix Pxx square-root + Eigen::LLT> lltOfPxx(matPxx); + Matrix matSxx{lltOfPxx.matrixL()}; // sqrt(P_{xx}) + + matSxx *= scalarMultiplier; // sqrt( (n + \kappa) * P_{xx} ) + + // X_0 = \bar{x} + util::copyToColumn(0, _sigmaX, vecX); + + for (int32_t i{0}; i < DIM; ++i) + { + const int32_t IDX_1{i + 1}; + const int32_t IDX_2{i + DIM + 1}; + + util::copyToColumn(IDX_1, _sigmaX, vecX); + util::copyToColumn(IDX_2, _sigmaX, vecX); + + const Vector vecShiftTerm{util::getColumnAt(i, matSxx)}; + + util::addColumnFrom( + IDX_1, _sigmaX, + vecShiftTerm); // X_i = \bar{x} + sqrt( (n + \kappa) * P_{xx} ) + util::subColumnFrom( + IDX_2, _sigmaX, + vecShiftTerm); // X_{i+n} = \bar{x} - sqrt( (n + \kappa) * P_{xx} ) + } + } + + /// + /// @brief transform sigma points X through the nonlinear function + /// @param nonlinearFunction nonlinear function to be used + /// @param sigmaY output transformed sigma points from the nonlinear function + /// + template + void transformSigmaPoints(NonLinearFunctionCallback nonlinearFunction, + Matrix& sigmaY) + { + for (int32_t i{0}; i < SIGMA_DIM; ++i) { - public: - static constexpr int32_t SIGMA_DIM{ (2 * DIM) + 1 }; - - UnscentedTransform() {} - ~UnscentedTransform() {} - - float32_t weight0() const { return _weights[0]; } - float32_t weighti() const { return _weights[1]; } - - /// - /// @brief algorithm to execute weight and sigma points calculation - /// @param vecX input mean vector - /// @param matPxx input covariance matrix - /// @param kappa design parameter - /// - void compute(const Vector & vecX, const Matrix & matPxx, const float32_t kappa = 0.0F) - { - // 1. calculate weights - updateWeights(kappa); - - // 2. update sigma points _sigmaX - updateSigmaPoints(vecX, matPxx, kappa); - } - - template - void calculateWeightedMeanAndCovariance(const Matrix & sigmaX, Vector & vecX, Matrix & matPxx) - { - // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] - vecX = _weights[0] * util::getColumnAt(0, sigmaX); - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - vecX += _weights[1] * util::getColumnAt(i, sigmaX); // y += W[0, i] Y[:, i] - } - - // 2. calculate covariance: P_{yy} = \sum_{i_0}^{2n} W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - Vector devXi{ util::getColumnAt(0, sigmaX) - vecX }; // Y[:, 0] - \bar{ y } - matPxx = _weights[0] * devXi * devXi.transpose(); // P_0 = W[0, 0] (Y[:, 0] - \bar{y}) (Y[:, 0] - \bar{y})^T - - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - devXi = util::getColumnAt(i, sigmaX) - vecX; // Y[:, i] - \bar{y} - - const Matrix Pi{ _weights[1] * devXi * devXi.transpose() }; // P_i = W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - - matPxx += Pi; // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - } - } - - /// - /// @brief algorithm to execute transforming sigma points through nonlinear function - /// and calculate the updated weights and covariance. - /// @param nonlinearFunction nonlinear function callback - /// @param vecY output mean vector (weighted mean vector) - /// @param matPyy output covariance matrix (weighted covariance matrix) - /// - template - void transform(NonLinearFunctionCallback nonlinearFunction, Vector & vecY, Matrix & matPyy) - { - Matrix sigmaY; - - // 3. transform sigma points X through the nonlinear model f(X) to obtain Y sigma points - transformSigmaPoints(nonlinearFunction, sigmaY); - - // 4. calculate weight mean and covariance from the transformed sigma points Y and weights - updateTransformedMeanAndCovariance(sigmaY, vecY, matPyy); - } - - void showSummary() - { - std::cout << "DIM_N : " << DIM << "\n"; - std::cout << "DIM_M : " << SIGMA_DIM << "\n"; - std::cout << "_weights[0] : \n" << _weights[0] << "\n"; - std::cout << "_weights[1] : \n" << _weights[1] << "\n"; - std::cout << "_sigmaX : \n" << _sigmaX << "\n"; - } - - Matrix<1, 2> & weights() { return _weights; } - const Matrix<1, 2> & weights() const { return _weights; } - - Matrix & sigmaX() { return _sigmaX; } - const Matrix & sigmaX() const { return _sigmaX; } - - private: - /// @brief unscented transform weight 0 for mean - Matrix<1, 2> _weights; - - /// @brief input sigma points - Matrix _sigmaX; - - /// - /// @brief algorithm to calculate the weights used to draw the sigma points - /// @param kappa design scaling parameter for sigma points selection - /// - void updateWeights(float32_t kappa) - { - static_assert(DIM > 0, "DIM is Zero which leads to numerical issue."); - - const float32_t denoTerm{ kappa + static_cast(DIM) }; - - _weights[0] = kappa / denoTerm; - _weights[1] = 0.5F / denoTerm; - } - - /// - /// @brief algorithm to calculate the deterministic sigma points for - /// the unscented transformation - /// - /// @param vecX mean of the normally distributed state - /// @param matPxx covariance of the normally distributed state - /// @param kappa design scaling parameter for sigma points selection - /// - void updateSigmaPoints(const Vector & vecX, const Matrix & matPxx, const float32_t kappa) - { - const float32_t scalarMultiplier{ std::sqrt(DIM + kappa) }; // sqrt(n + \kappa) - - // cholesky factorization to get matrix Pxx square-root - Eigen::LLT> lltOfPxx(matPxx); - Matrix matSxx{ lltOfPxx.matrixL() }; // sqrt(P_{xx}) - - matSxx *= scalarMultiplier; // sqrt( (n + \kappa) * P_{xx} ) - - // X_0 = \bar{x} - util::copyToColumn< DIM, SIGMA_DIM >(0, _sigmaX, vecX); - - for (int32_t i{ 0 }; i < DIM; ++i) - { - const int32_t IDX_1{ i + 1 }; - const int32_t IDX_2{ i + DIM + 1 }; - - util::copyToColumn< DIM, SIGMA_DIM >(IDX_1, _sigmaX, vecX); - util::copyToColumn< DIM, SIGMA_DIM >(IDX_2, _sigmaX, vecX); - - const Vector vecShiftTerm{ util::getColumnAt< DIM, DIM >(i, matSxx) }; - - util::addColumnFrom< DIM, SIGMA_DIM >(IDX_1, _sigmaX, vecShiftTerm); // X_i = \bar{x} + sqrt( (n + \kappa) * P_{xx} ) - util::subColumnFrom< DIM, SIGMA_DIM >(IDX_2, _sigmaX, vecShiftTerm); // X_{i+n} = \bar{x} - sqrt( (n + \kappa) * P_{xx} ) - } - } - - /// - /// @brief transform sigma points X through the nonlinear function - /// @param nonlinearFunction nonlinear function to be used - /// @param sigmaY output transformed sigma points from the nonlinear function - /// - template - void transformSigmaPoints(NonLinearFunctionCallback nonlinearFunction, Matrix & sigmaY) - { - for (int32_t i{ 0 }; i < SIGMA_DIM; ++i) - { - const Vector x{ util::getColumnAt(i, _sigmaX) }; - const Vector y{ nonlinearFunction(x) }; // y = f(x) - - util::copyToColumn(i, sigmaY, y); // Y[:, i] = y - } - } - - /// - /// @brief calculate weighted mean and covariance given a set of sigma points with associated weights. - /// @param sigmaY input transformed sigma points from the nonlinear function - /// @param vecY output weighted mean vector - /// @param matPyy output weighted covariance matrix - /// - void updateTransformedMeanAndCovariance(const Matrix & sigmaY, Vector & vecY, Matrix & matPyy) - { - // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] - vecY = _weights[0] * util::getColumnAt(0, sigmaY); - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - vecY += _weights[1] * util::getColumnAt(i, sigmaY); // y += W[0, i] Y[:, i] - } - - // 2. calculate covariance: P_{yy} = \sum_{i_0}^{2n} W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - Vector devYi{ util::getColumnAt(0, sigmaY) - vecY }; // Y[:, 0] - \bar{ y } - matPyy = _weights[0] * devYi * devYi.transpose(); // P_0 = W[0, 0] (Y[:, 0] - \bar{y}) (Y[:, 0] - \bar{y})^T - - for (int32_t i{ 1 }; i < SIGMA_DIM; ++i) - { - devYi = util::getColumnAt(i, sigmaY) - vecY; // Y[:, i] - \bar{y} - - const Matrix Pi{ _weights[1] * devYi * devYi.transpose() }; // P_i = W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - - matPyy += Pi; // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T - } - } - }; -} - -#endif // UNSCENTED_TRANSFORM_H + const Vector x{util::getColumnAt(i, _sigmaX)}; + const Vector y{nonlinearFunction(x)}; // y = f(x) + + util::copyToColumn(i, sigmaY, y); // Y[:, i] = y + } + } + + /// + /// @brief calculate weighted mean and covariance given a set of sigma points + /// with associated weights. + /// @param sigmaY input transformed sigma points from the nonlinear function + /// @param vecY output weighted mean vector + /// @param matPyy output weighted covariance matrix + /// + void updateTransformedMeanAndCovariance(const Matrix& sigmaY, + Vector& vecY, + Matrix& matPyy) + { + // 1. calculate mean: \bar{y} = \sum_{i_0}^{2n} W[0, i] Y[:, i] + vecY = _weights[0] * util::getColumnAt(0, sigmaY); + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + vecY += _weights[1] * util::getColumnAt( + i, sigmaY); // y += W[0, i] Y[:, i] + } + + // 2. calculate covariance: P_{yy} = \sum_{i_0}^{2n} W[0, i] (Y[:, i] - + // \bar{y}) (Y[:, i] - \bar{y})^T + Vector devYi{util::getColumnAt(0, sigmaY) - + vecY}; // Y[:, 0] - \bar{ y } + matPyy = _weights[0] * devYi * + devYi.transpose(); // P_0 = W[0, 0] (Y[:, 0] - \bar{y}) (Y[:, 0] - + // \bar{y})^T + + for (int32_t i{1}; i < SIGMA_DIM; ++i) + { + devYi = util::getColumnAt(i, sigmaY) - + vecY; // Y[:, i] - \bar{y} + + const Matrix Pi{ + _weights[1] * devYi * + devYi.transpose()}; // P_i = W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - + // \bar{y})^T + + matPyy += Pi; // y += W[0, i] (Y[:, i] - \bar{y}) (Y[:, i] - \bar{y})^T + } + } +}; +} // namespace kf + +#endif // UNSCENTED_TRANSFORM_H diff --git a/src/openkf/motion_model/ego_motion_model.cpp b/src/openkf/motion_model/ego_motion_model.cpp index cdbb4e8..0cbf89a 100644 --- a/src/openkf/motion_model/ego_motion_model.cpp +++ b/src/openkf/motion_model/ego_motion_model.cpp @@ -2,198 +2,208 @@ namespace kf { - namespace motionmodel - { - Vector EgoMotionModel::f(Vector const & vecX, Vector const & vecU, Vector const & vecQ) const - { - Vector vecXout; - float32_t & oPosX{ vecXout[0] }; - float32_t & oPosY{ vecXout[1] }; - float32_t & oYaw { vecXout[2] }; - - float32_t const & iPosX{ vecX[0] }; - float32_t const & iPosY{ vecX[1] }; - float32_t const & iYaw { vecX[2] }; - - float32_t const & qPosX{ vecQ[0] }; - float32_t const & qPosY{ vecQ[1] }; - float32_t const & qYaw { vecQ[2] }; - - float32_t const & deltaDist{ vecU[0] }; - float32_t const & deltaYaw { vecU[1] }; - - float32_t const halfDeltaYaw{ deltaYaw / 2.0F }; - float32_t const iYawPlusHalfDeltaYaw{ iYaw + halfDeltaYaw }; - - oPosX = iPosX + (deltaDist * std::cos(iYawPlusHalfDeltaYaw)) + qPosX; - oPosY = iPosY + (deltaDist * std::sin(iYawPlusHalfDeltaYaw)) + qPosY; - oYaw = iYaw + deltaYaw + qYaw; - - return vecXout; - } - - Matrix EgoMotionModel::getProcessNoiseCov(Vector const & vecX, Vector const & vecU) const - { - Matrix matQk; - - float32_t & q11 = matQk(0, 0); - float32_t & q12 = matQk(0, 1); - float32_t & q13 = matQk(0, 2); - - float32_t & q21 = matQk(1, 0); - float32_t & q22 = matQk(1, 1); - float32_t & q23 = matQk(1, 2); - - float32_t & q31 = matQk(2, 0); - float32_t & q32 = matQk(2, 1); - float32_t & q33 = matQk(2, 2); - - float32_t const & iYaw { vecX[2] }; +namespace motionmodel +{ +Vector EgoMotionModel::f(Vector const& vecX, + Vector const& vecU, + Vector const& vecQ) const +{ + Vector vecXout; + float32_t& oPosX{vecXout[0]}; + float32_t& oPosY{vecXout[1]}; + float32_t& oYaw{vecXout[2]}; - float32_t const & deltaDist{ vecU[0] }; - float32_t const & deltaYaw { vecU[1] }; + float32_t const& iPosX{vecX[0]}; + float32_t const& iPosY{vecX[1]}; + float32_t const& iYaw{vecX[2]}; - float32_t const deltaDistSquare{ deltaDist * deltaDist }; - float32_t const halfDeltaYaw{ deltaYaw / 2.0F }; - float32_t const iYawPlusHalfDeltaYaw{ iYaw + halfDeltaYaw }; - float32_t const sinTheta{ std::sin(iYawPlusHalfDeltaYaw) }; - float32_t const cosTheta{ std::cos(iYawPlusHalfDeltaYaw) }; - float32_t const sinCosTheta{ sinTheta * cosTheta }; + float32_t const& qPosX{vecQ[0]}; + float32_t const& qPosY{vecQ[1]}; + float32_t const& qYaw{vecQ[2]}; - q11 = m_qX + (deltaDistSquare * sinTheta * sinTheta * m_qTheta); - q12 = -deltaDistSquare * sinCosTheta * m_qTheta; - q13 = -deltaDist * sinTheta * m_qTheta; + float32_t const& deltaDist{vecU[0]}; + float32_t const& deltaYaw{vecU[1]}; - q21 = -deltaDistSquare * sinCosTheta * m_qTheta; - q22 = m_qY + (deltaDistSquare * cosTheta * cosTheta * m_qTheta); - q23 = deltaDist * cosTheta * m_qTheta; + float32_t const halfDeltaYaw{deltaYaw / 2.0F}; + float32_t const iYawPlusHalfDeltaYaw{iYaw + halfDeltaYaw}; - q31 = -deltaDist * sinTheta * m_qTheta; - q32 = deltaDist * cosTheta * m_qTheta; - q33 = m_qTheta; + oPosX = iPosX + (deltaDist * std::cos(iYawPlusHalfDeltaYaw)) + qPosX; + oPosY = iPosY + (deltaDist * std::sin(iYawPlusHalfDeltaYaw)) + qPosY; + oYaw = iYaw + deltaYaw + qYaw; - return matQk; - } + return vecXout; +} - Matrix EgoMotionModel::getInputNoiseCov(Vector const &vecX, Vector const &vecU) const - { - Matrix matUk; +Matrix EgoMotionModel::getProcessNoiseCov( + Vector const& vecX, Vector const& vecU) const +{ + Matrix matQk; - float32_t & u11 = matUk(0, 0); - float32_t & u12 = matUk(0, 1); - float32_t & u13 = matUk(0, 2); + float32_t& q11 = matQk(0, 0); + float32_t& q12 = matQk(0, 1); + float32_t& q13 = matQk(0, 2); - float32_t & u21 = matUk(1, 0); - float32_t & u22 = matUk(1, 1); - float32_t & u23 = matUk(1, 2); + float32_t& q21 = matQk(1, 0); + float32_t& q22 = matQk(1, 1); + float32_t& q23 = matQk(1, 2); - float32_t & u31 = matUk(2, 0); - float32_t & u32 = matUk(2, 1); - float32_t & u33 = matUk(2, 2); + float32_t& q31 = matQk(2, 0); + float32_t& q32 = matQk(2, 1); + float32_t& q33 = matQk(2, 2); - float32_t const & iYaw { vecX[2] }; + float32_t const& iYaw{vecX[2]}; - float32_t const & deltaDist{ vecU[0] }; - float32_t const & deltaYaw { vecU[1] }; + float32_t const& deltaDist{vecU[0]}; + float32_t const& deltaYaw{vecU[1]}; - float32_t const deltaDistSquare{ deltaDist * deltaDist }; - float32_t const halfDeltaDistSquare{ deltaDist / 2.0F }; - float32_t const quarterDeltaDistSquare{ deltaDistSquare / 4.0F }; - float32_t const halfDeltaYaw{ deltaYaw / 2.0F }; - float32_t const iYawPlusHalfDeltaYaw{ iYaw + halfDeltaYaw }; - float32_t const sinTheta{ std::sin(iYawPlusHalfDeltaYaw) }; - float32_t const cosTheta{ std::cos(iYawPlusHalfDeltaYaw) }; - float32_t const sin2Theta{ sinTheta * sinTheta }; - float32_t const cos2Theta{ cosTheta * cosTheta }; - float32_t const sinCosTheta{ sinTheta * cosTheta }; + float32_t const deltaDistSquare{deltaDist * deltaDist}; + float32_t const halfDeltaYaw{deltaYaw / 2.0F}; + float32_t const iYawPlusHalfDeltaYaw{iYaw + halfDeltaYaw}; + float32_t const sinTheta{std::sin(iYawPlusHalfDeltaYaw)}; + float32_t const cosTheta{std::cos(iYawPlusHalfDeltaYaw)}; + float32_t const sinCosTheta{sinTheta * cosTheta}; - u11 = (m_uDeltaDist * cos2Theta) + (m_uDeltaYaw * quarterDeltaDistSquare * sin2Theta); - u12 = (m_uDeltaDist * sinCosTheta) - (m_uDeltaYaw * quarterDeltaDistSquare * sinCosTheta); - u13 = -m_uDeltaYaw * halfDeltaDistSquare * sinTheta; + q11 = m_qX + (deltaDistSquare * sinTheta * sinTheta * m_qTheta); + q12 = -deltaDistSquare * sinCosTheta * m_qTheta; + q13 = -deltaDist * sinTheta * m_qTheta; - u21 = (m_uDeltaDist * sinCosTheta) - (m_uDeltaYaw * quarterDeltaDistSquare * sinCosTheta); - u22 = (m_uDeltaDist * sin2Theta) + (m_uDeltaYaw * quarterDeltaDistSquare * cos2Theta); - u23 = m_uDeltaYaw * halfDeltaDistSquare * cosTheta; + q21 = -deltaDistSquare * sinCosTheta * m_qTheta; + q22 = m_qY + (deltaDistSquare * cosTheta * cosTheta * m_qTheta); + q23 = deltaDist * cosTheta * m_qTheta; - u31 = -m_uDeltaYaw * halfDeltaDistSquare * sinTheta; - u32 = m_uDeltaYaw * halfDeltaDistSquare * cosTheta; - u33 = m_uDeltaYaw; + q31 = -deltaDist * sinTheta * m_qTheta; + q32 = deltaDist * cosTheta * m_qTheta; + q33 = m_qTheta; - return matUk; - } + return matQk; +} - Matrix EgoMotionModel::getJacobianFk(Vector const & vecX, Vector const & vecU) const - { - Matrix matFk; +Matrix EgoMotionModel::getInputNoiseCov( + Vector const& vecX, Vector const& vecU) const +{ + Matrix matUk; + + float32_t& u11 = matUk(0, 0); + float32_t& u12 = matUk(0, 1); + float32_t& u13 = matUk(0, 2); + + float32_t& u21 = matUk(1, 0); + float32_t& u22 = matUk(1, 1); + float32_t& u23 = matUk(1, 2); + + float32_t& u31 = matUk(2, 0); + float32_t& u32 = matUk(2, 1); + float32_t& u33 = matUk(2, 2); + + float32_t const& iYaw{vecX[2]}; + + float32_t const& deltaDist{vecU[0]}; + float32_t const& deltaYaw{vecU[1]}; + + float32_t const deltaDistSquare{deltaDist * deltaDist}; + float32_t const halfDeltaDistSquare{deltaDist / 2.0F}; + float32_t const quarterDeltaDistSquare{deltaDistSquare / 4.0F}; + float32_t const halfDeltaYaw{deltaYaw / 2.0F}; + float32_t const iYawPlusHalfDeltaYaw{iYaw + halfDeltaYaw}; + float32_t const sinTheta{std::sin(iYawPlusHalfDeltaYaw)}; + float32_t const cosTheta{std::cos(iYawPlusHalfDeltaYaw)}; + float32_t const sin2Theta{sinTheta * sinTheta}; + float32_t const cos2Theta{cosTheta * cosTheta}; + float32_t const sinCosTheta{sinTheta * cosTheta}; + + u11 = (m_uDeltaDist * cos2Theta) + + (m_uDeltaYaw * quarterDeltaDistSquare * sin2Theta); + u12 = (m_uDeltaDist * sinCosTheta) - + (m_uDeltaYaw * quarterDeltaDistSquare * sinCosTheta); + u13 = -m_uDeltaYaw * halfDeltaDistSquare * sinTheta; + + u21 = (m_uDeltaDist * sinCosTheta) - + (m_uDeltaYaw * quarterDeltaDistSquare * sinCosTheta); + u22 = (m_uDeltaDist * sin2Theta) + + (m_uDeltaYaw * quarterDeltaDistSquare * cos2Theta); + u23 = m_uDeltaYaw * halfDeltaDistSquare * cosTheta; + + u31 = -m_uDeltaYaw * halfDeltaDistSquare * sinTheta; + u32 = m_uDeltaYaw * halfDeltaDistSquare * cosTheta; + u33 = m_uDeltaYaw; + + return matUk; +} + +Matrix EgoMotionModel::getJacobianFk( + Vector const& vecX, Vector const& vecU) const +{ + Matrix matFk; - float32_t & df1dx1 = matFk(0, 0); - float32_t & df1dx2 = matFk(0, 1); - float32_t & df1dx3 = matFk(0, 2); + float32_t& df1dx1 = matFk(0, 0); + float32_t& df1dx2 = matFk(0, 1); + float32_t& df1dx3 = matFk(0, 2); - float32_t & df2dx1 = matFk(1, 0); - float32_t & df2dx2 = matFk(1, 1); - float32_t & df2dx3 = matFk(1, 2); + float32_t& df2dx1 = matFk(1, 0); + float32_t& df2dx2 = matFk(1, 1); + float32_t& df2dx3 = matFk(1, 2); - float32_t & df3dx1 = matFk(2, 0); - float32_t & df3dx2 = matFk(2, 1); - float32_t & df3dx3 = matFk(2, 2); + float32_t& df3dx1 = matFk(2, 0); + float32_t& df3dx2 = matFk(2, 1); + float32_t& df3dx3 = matFk(2, 2); - float32_t const & iYaw { vecX[2] }; + float32_t const& iYaw{vecX[2]}; - float32_t const & deltaDist{ vecU[0] }; - float32_t const & deltaYaw { vecU[1] }; + float32_t const& deltaDist{vecU[0]}; + float32_t const& deltaYaw{vecU[1]}; - float32_t const halfDeltaYaw{ deltaYaw / 2.0F }; - float32_t const iYawPlusHalfDeltaYaw{ iYaw + halfDeltaYaw }; + float32_t const halfDeltaYaw{deltaYaw / 2.0F}; + float32_t const iYawPlusHalfDeltaYaw{iYaw + halfDeltaYaw}; - df1dx1 = 1.0F; - df1dx2 = 0.0F; - df1dx3 = -deltaDist * std::sin(iYawPlusHalfDeltaYaw); + df1dx1 = 1.0F; + df1dx2 = 0.0F; + df1dx3 = -deltaDist * std::sin(iYawPlusHalfDeltaYaw); - df2dx1 = 0.0F; - df2dx2 = 1.0F; - df2dx3 = deltaDist * std::cos(iYawPlusHalfDeltaYaw); + df2dx1 = 0.0F; + df2dx2 = 1.0F; + df2dx3 = deltaDist * std::cos(iYawPlusHalfDeltaYaw); - df3dx1 = 0.0F; - df3dx2 = 0.0F; - df3dx3 = 1.0F; + df3dx1 = 0.0F; + df3dx2 = 0.0F; + df3dx3 = 1.0F; - return matFk; - } + return matFk; +} - Matrix EgoMotionModel::getJacobianBk(Vector const & vecX, Vector const & vecU) const - { - Matrix matBk; +Matrix EgoMotionModel::getJacobianBk( + Vector const& vecX, Vector const& vecU) const +{ + Matrix matBk; - float32_t & df1du1 = matBk(0, 0); - float32_t & df1du2 = matBk(0, 1); + float32_t& df1du1 = matBk(0, 0); + float32_t& df1du2 = matBk(0, 1); - float32_t & df2du1 = matBk(1, 0); - float32_t & df2du2 = matBk(1, 1); + float32_t& df2du1 = matBk(1, 0); + float32_t& df2du2 = matBk(1, 1); - float32_t & df3du1 = matBk(2, 0); - float32_t & df3du2 = matBk(2, 1); + float32_t& df3du1 = matBk(2, 0); + float32_t& df3du2 = matBk(2, 1); - float32_t const & iYaw { vecX[2] }; + float32_t const& iYaw{vecX[2]}; - float32_t const & deltaDist{ vecU[0] }; - float32_t const & deltaYaw { vecU[1] }; + float32_t const& deltaDist{vecU[0]}; + float32_t const& deltaYaw{vecU[1]}; - float32_t const halfDeltaDist{ deltaDist / 2.0F }; - float32_t const halfDeltaYaw { deltaYaw / 2.0F }; + float32_t const halfDeltaDist{deltaDist / 2.0F}; + float32_t const halfDeltaYaw{deltaYaw / 2.0F}; - float32_t const iYawPlusHalfDeltaYaw{ iYaw + halfDeltaYaw }; + float32_t const iYawPlusHalfDeltaYaw{iYaw + halfDeltaYaw}; - df1du1 = std::cos(iYawPlusHalfDeltaYaw); - df1du2 = -halfDeltaDist * std::sin(iYawPlusHalfDeltaYaw); + df1du1 = std::cos(iYawPlusHalfDeltaYaw); + df1du2 = -halfDeltaDist * std::sin(iYawPlusHalfDeltaYaw); - df2du1 = std::sin(iYawPlusHalfDeltaYaw); - df2du2 = halfDeltaDist * std::cos(iYawPlusHalfDeltaYaw); + df2du1 = std::sin(iYawPlusHalfDeltaYaw); + df2du2 = halfDeltaDist * std::cos(iYawPlusHalfDeltaYaw); - df3du1 = 0.0F; - df3du2 = 1.0F; + df3du1 = 0.0F; + df3du2 = 1.0F; - return matBk; - } - } -} \ No newline at end of file + return matBk; +} +} // namespace motionmodel +} // namespace kf \ No newline at end of file diff --git a/src/openkf/motion_model/ego_motion_model.h b/src/openkf/motion_model/ego_motion_model.h index ec50e1b..5ecf80e 100644 --- a/src/openkf/motion_model/ego_motion_model.h +++ b/src/openkf/motion_model/ego_motion_model.h @@ -1,120 +1,109 @@ #ifndef OPENKF_EGO_MOTION_MODEL_H #define OPENKF_EGO_MOTION_MODEL_H -#include "types.h" #include "motion_model.h" +#include "types.h" namespace kf { - namespace motionmodel - { - - /// @brief State space dimension for ego-motion model \vec{x}=[x, y, yaw]^T - static constexpr int32_t DIM_X{ 3 }; - - /// @brief Input space dimension used by ego-motion model \vec{u}=[delta_distance, delta_yaw]^T - static constexpr int32_t DIM_U{ 2 }; - - /// @brief Derived class from base motion model class with external inputs - /// Motion model for ego vehicle which is used for dead reckoning purposes - /// by utilizing odometry external inputs vehicle displacement and change - /// in heading angle and incrementing them to obtain the new state. - class EgoMotionModel : public MotionModelExtInput - { - public: - - EgoMotionModel() - : m_qX(0.0F) - , m_qY(0.0F) - , m_qTheta(0.0F) - , m_uDeltaDist(0.0F) - , m_uDeltaYaw(0.0F) - {} - - /// @brief Prediction motion model function that propagate the previous state to next state in time. - /// @param vecX State space vector \vec{x} - /// @param vecU Input space vector \vec{u} - /// @param vecQ State white gaussian noise vector \vec{q} - /// @return Predicted/ propagated state space vector - virtual Vector f(Vector const & vecX, Vector const & vecU, Vector const & vecQ = Vector::Zero()) const override; - - /// @brief Get the process noise covariance Q - /// @param vecX State space vector \vec{x} - /// @param vecU Input space vector \vec{u} - /// @return The process noise covariance Q - virtual Matrix getProcessNoiseCov(Vector const & vecX, Vector const & vecU) const override; - - /// @brief Get the input noise covariance U - /// @param vecX State space vector \vec{x} - /// @param vecU Input space vector \vec{u} - /// @return The input noise covariance U - virtual Matrix getInputNoiseCov(Vector const & vecX, Vector const & vecU) const override; - - /// @brief Method that calculates the jacobians of the state transition model. - /// @param vecX State Space vector \vec{x} - /// @param vecU Input Space vector \vec{u} - /// @return The jacobians of the state transition model. - virtual Matrix getJacobianFk(Vector const & vecX, Vector const & vecU) const override; - - /// @brief Method that calculates the jacobians of the input transition model. - /// @param vecX State Space vector \vec{x} - /// @param vecU Input Space vector \vec{u} - /// @return The jacobians of the input transition model. - virtual Matrix getJacobianBk(Vector const & vecX, Vector const & vecU) const override; - - /// @brief Setter for noise variance of pose-x state. - /// @param val variance value - void setNoiseX(float32_t const val) - { - m_qX = val; - } - - /// @brief Setter for noise variance of pose-y state. - /// @param val variance value - void setNoiseY(float32_t const val) - { - m_qY = val; - } - - /// @brief Setter for noise variance of pose-yaw state. - /// @param val variance value - void setNoiseTheta(float32_t const val) - { - m_qTheta = val; - } - - /// @brief Setter for noise variance of delta distance input. - /// @param val variance value - void setNoiseDeltaDist(float32_t const val) - { - m_uDeltaDist = val; - } - - /// @brief Setter for noise variance of delta yaw input - /// @param val variance value - void setNoiseDeltaYaw(float32_t const val) - { - m_uDeltaYaw = val; - } - - private: - - /// @brief The noise variance of pose-x state. - float32_t m_qX; - - /// @brief The noise variance of pose-y state. - float32_t m_qY; - - /// @brief The noise variance of pose-yaw state. - float32_t m_qTheta; - - /// @brief The noise variance of delta distance input. - float32_t m_uDeltaDist; - - /// @brief The noise variance of delta yaw input. - float32_t m_uDeltaYaw; - }; - } -} - -#endif // OPENKF_EGO_MOTION_MODEL_H \ No newline at end of file +namespace motionmodel +{ + +/// @brief State space dimension for ego-motion model \vec{x}=[x, y, yaw]^T +static constexpr int32_t DIM_X{3}; + +/// @brief Input space dimension used by ego-motion model +/// \vec{u}=[delta_distance, delta_yaw]^T +static constexpr int32_t DIM_U{2}; + +/// @brief Derived class from base motion model class with external inputs +/// Motion model for ego vehicle which is used for dead reckoning purposes +/// by utilizing odometry external inputs vehicle displacement and change +/// in heading angle and incrementing them to obtain the new state. +class EgoMotionModel : public MotionModelExtInput +{ + public: + EgoMotionModel() + : m_qX(0.0F), m_qY(0.0F), m_qTheta(0.0F), m_uDeltaDist(0.0F), + m_uDeltaYaw(0.0F) + { + } + + /// @brief Prediction motion model function that propagate the previous state + /// to next state in time. + /// @param vecX State space vector \vec{x} + /// @param vecU Input space vector \vec{u} + /// @param vecQ State white gaussian noise vector \vec{q} + /// @return Predicted/ propagated state space vector + virtual Vector f( + Vector const& vecX, Vector const& vecU, + Vector const& vecQ = Vector::Zero()) const override; + + /// @brief Get the process noise covariance Q + /// @param vecX State space vector \vec{x} + /// @param vecU Input space vector \vec{u} + /// @return The process noise covariance Q + virtual Matrix getProcessNoiseCov( + Vector const& vecX, Vector const& vecU) const override; + + /// @brief Get the input noise covariance U + /// @param vecX State space vector \vec{x} + /// @param vecU Input space vector \vec{u} + /// @return The input noise covariance U + virtual Matrix getInputNoiseCov( + Vector const& vecX, Vector const& vecU) const override; + + /// @brief Method that calculates the jacobians of the state transition model. + /// @param vecX State Space vector \vec{x} + /// @param vecU Input Space vector \vec{u} + /// @return The jacobians of the state transition model. + virtual Matrix getJacobianFk( + Vector const& vecX, Vector const& vecU) const override; + + /// @brief Method that calculates the jacobians of the input transition model. + /// @param vecX State Space vector \vec{x} + /// @param vecU Input Space vector \vec{u} + /// @return The jacobians of the input transition model. + virtual Matrix getJacobianBk( + Vector const& vecX, Vector const& vecU) const override; + + /// @brief Setter for noise variance of pose-x state. + /// @param val variance value + void setNoiseX(float32_t const val) { m_qX = val; } + + /// @brief Setter for noise variance of pose-y state. + /// @param val variance value + void setNoiseY(float32_t const val) { m_qY = val; } + + /// @brief Setter for noise variance of pose-yaw state. + /// @param val variance value + void setNoiseTheta(float32_t const val) { m_qTheta = val; } + + /// @brief Setter for noise variance of delta distance input. + /// @param val variance value + void setNoiseDeltaDist(float32_t const val) { m_uDeltaDist = val; } + + /// @brief Setter for noise variance of delta yaw input + /// @param val variance value + void setNoiseDeltaYaw(float32_t const val) { m_uDeltaYaw = val; } + + private: + /// @brief The noise variance of pose-x state. + float32_t m_qX; + + /// @brief The noise variance of pose-y state. + float32_t m_qY; + + /// @brief The noise variance of pose-yaw state. + float32_t m_qTheta; + + /// @brief The noise variance of delta distance input. + float32_t m_uDeltaDist; + + /// @brief The noise variance of delta yaw input. + float32_t m_uDeltaYaw; +}; +} // namespace motionmodel +} // namespace kf + +#endif // OPENKF_EGO_MOTION_MODEL_H \ No newline at end of file diff --git a/src/openkf/motion_model/motion_model.h b/src/openkf/motion_model/motion_model.h index 30544c8..cddbc4d 100644 --- a/src/openkf/motion_model/motion_model.h +++ b/src/openkf/motion_model/motion_model.h @@ -5,72 +5,83 @@ namespace kf { - namespace motionmodel - { - /// @brief Base class for motion models used by kalman filters - /// @tparam DIM_X State space vector dimension - template - class MotionModel - { - public: - - /// @brief Prediction motion model function that propagate the previous state to next state in time. - /// @param vecX State space vector \vec{x} - /// @param vecQ State white gaussian noise vector \vec{q} - /// @return Predicted/ propagated state space vector - virtual Vector f(Vector const & vecX, Vector const & vecQ = Vector::Zero()) const = 0; - - /// @brief Get the process noise covariance Q - /// @param vecX State space vector \vec{x} - /// @return The process noise covariance Q - virtual Matrix getProcessNoiseCov(Vector const & vecX) const = 0; +namespace motionmodel +{ +/// @brief Base class for motion models used by kalman filters +/// @tparam DIM_X State space vector dimension +template +class MotionModel +{ + public: + /// @brief Prediction motion model function that propagate the previous state + /// to next state in time. + /// @param vecX State space vector \vec{x} + /// @param vecQ State white gaussian noise vector \vec{q} + /// @return Predicted/ propagated state space vector + virtual Vector f( + Vector const& vecX, + Vector const& vecQ = Vector::Zero()) const = 0; - /// @brief Method that calculates the jacobians of the state transition model. - /// @param vecX State Space vector \vec{x} - /// @return The jacobians of the state transition model. - virtual Matrix getJacobianFk(Vector const & vecX) const = 0; - }; + /// @brief Get the process noise covariance Q + /// @param vecX State space vector \vec{x} + /// @return The process noise covariance Q + virtual Matrix getProcessNoiseCov( + Vector const& vecX) const = 0; - /// @brief Base class for motion models with external inputs used by kalman filters - /// @tparam DIM_X State space vector dimension - /// @tparam DIM_U Input space vector dimension - template - class MotionModelExtInput - { - public: + /// @brief Method that calculates the jacobians of the state transition model. + /// @param vecX State Space vector \vec{x} + /// @return The jacobians of the state transition model. + virtual Matrix getJacobianFk( + Vector const& vecX) const = 0; +}; - /// @brief Prediction motion model function that propagate the previous state to next state in time. - /// @param vecX State space vector \vec{x} - /// @param vecU Input space vector \vec{u} - /// @param vecQ State white gaussian noise vector \vec{q} - /// @return Predicted/ propagated state space vector - virtual Vector f(Vector const & vecX, Vector const & vecU, Vector const & vecQ = Vector::Zero()) const = 0; +/// @brief Base class for motion models with external inputs used by kalman +/// filters +/// @tparam DIM_X State space vector dimension +/// @tparam DIM_U Input space vector dimension +template +class MotionModelExtInput +{ + public: + /// @brief Prediction motion model function that propagate the previous state + /// to next state in time. + /// @param vecX State space vector \vec{x} + /// @param vecU Input space vector \vec{u} + /// @param vecQ State white gaussian noise vector \vec{q} + /// @return Predicted/ propagated state space vector + virtual Vector f( + Vector const& vecX, Vector const& vecU, + Vector const& vecQ = Vector::Zero()) const = 0; - /// @brief Get the process noise covariance Q - /// @param vecX State space vector \vec{x} - /// @param vecU Input space vector \vec{u} - /// @return The process noise covariance Q - virtual Matrix getProcessNoiseCov(Vector const & vecX, Vector const & vecU) const = 0; + /// @brief Get the process noise covariance Q + /// @param vecX State space vector \vec{x} + /// @param vecU Input space vector \vec{u} + /// @return The process noise covariance Q + virtual Matrix getProcessNoiseCov( + Vector const& vecX, Vector const& vecU) const = 0; - /// @brief Get the input noise covariance U - /// @param vecX State space vector \vec{x} - /// @param vecU Input space vector \vec{u} - /// @return The input noise covariance U - virtual Matrix getInputNoiseCov(Vector const & vecX, Vector const & vecU) const = 0; + /// @brief Get the input noise covariance U + /// @param vecX State space vector \vec{x} + /// @param vecU Input space vector \vec{u} + /// @return The input noise covariance U + virtual Matrix getInputNoiseCov( + Vector const& vecX, Vector const& vecU) const = 0; - /// @brief Method that calculates the jacobians of the state transition model. - /// @param vecX State Space vector \vec{x} - /// @param vecU Input Space vector \vec{u} - /// @return The jacobians of the state transition model. - virtual Matrix getJacobianFk(Vector const & vecX, Vector const & vecU) const = 0; + /// @brief Method that calculates the jacobians of the state transition model. + /// @param vecX State Space vector \vec{x} + /// @param vecU Input Space vector \vec{u} + /// @return The jacobians of the state transition model. + virtual Matrix getJacobianFk( + Vector const& vecX, Vector const& vecU) const = 0; - /// @brief Method that calculates the jacobians of the input transition model. - /// @param vecX State Space vector \vec{x} - /// @param vecU Input Space vector \vec{u} - /// @return The jacobians of the input transition model. - virtual Matrix getJacobianBk(Vector const & vecX, Vector const & vecU) const = 0; - }; - } -} + /// @brief Method that calculates the jacobians of the input transition model. + /// @param vecX State Space vector \vec{x} + /// @param vecU Input Space vector \vec{u} + /// @return The jacobians of the input transition model. + virtual Matrix getJacobianBk( + Vector const& vecX, Vector const& vecU) const = 0; +}; +} // namespace motionmodel +} // namespace kf -#endif // OPENKF_MOTION_MODEL_H +#endif // OPENKF_MOTION_MODEL_H diff --git a/src/openkf/types.h b/src/openkf/types.h index 28893ca..18a84e7 100644 --- a/src/openkf/types.h +++ b/src/openkf/types.h @@ -12,18 +12,18 @@ #ifndef OPENKF_TYPES_H #define OPENKF_TYPES_H -#include #include +#include namespace kf { - using float32_t = float; +using float32_t = float; - template - using Matrix = Eigen::Matrix; +template +using Matrix = Eigen::Matrix; - template - using Vector = Eigen::Matrix; -} +template +using Vector = Eigen::Matrix; +} // namespace kf -#endif // OPENKF_TYPES_H +#endif // OPENKF_TYPES_H diff --git a/src/openkf/util.h b/src/openkf/util.h index 890862a..d66404a 100644 --- a/src/openkf/util.h +++ b/src/openkf/util.h @@ -16,218 +16,228 @@ namespace kf { - namespace util +namespace util +{ +template +void copyToColumn(const int32_t colIdx, Matrix& lhsSigmaX, + const Vector& rhsVecX) +{ + for (int32_t i{0}; i < ROWS; ++i) + { // rows + lhsSigmaX(i, colIdx) = rhsVecX[i]; + } +} + +template +void addColumnFrom(const int32_t colIdx, Matrix& lhsSigmaX, + const Vector& rhsVecX) +{ + for (int32_t i{0}; i < ROWS; ++i) + { // rows + lhsSigmaX(i, colIdx) += rhsVecX[i]; + } +} + +template +void subColumnFrom(const int32_t colIdx, Matrix& lhsSigmaX, + const Vector& rhsVecX) +{ + for (int32_t i{0}; i < ROWS; ++i) + { // rows + lhsSigmaX(i, colIdx) -= rhsVecX[i]; + } +} + +template +Vector getColumnAt(const int32_t colIdx, const Matrix& matX) +{ + assert(colIdx < COLS); // assert if colIdx is out of boundary + + Vector vecXi; + + for (int32_t i{0}; i < ROWS; ++i) + { // rows + vecXi[i] = matX(i, colIdx); + } + + return vecXi; +} + +template +Matrix getBlock(const Matrix& matA, + int32_t startRowIdx, int32_t startColIdx) +{ + Matrix matB; + + for (int32_t i = startRowIdx; i < startRowIdx + N_ROWS; ++i) + { + for (int32_t j = startColIdx; j < startColIdx + N_COLS; ++j) + { + matB(i - startRowIdx, j - startColIdx) = matA(i, j); + } + } + + return matB; +} + +template +Matrix getUpperTriangulerView(const Matrix& matA) +{ + Matrix matB; + for (int32_t i = 0; i < ROWS; ++i) + { + for (int32_t j = i; j < COLS; ++j) { - template - void copyToColumn(const int32_t colIdx, Matrix& lhsSigmaX, const Vector& rhsVecX) - { - for (int32_t i{ 0 }; i < ROWS; ++i) - { // rows - lhsSigmaX(i, colIdx) = rhsVecX[i]; - } - } - - template - void addColumnFrom(const int32_t colIdx, Matrix& lhsSigmaX, const Vector& rhsVecX) - { - for (int32_t i{ 0 }; i < ROWS; ++i) - { // rows - lhsSigmaX(i, colIdx) += rhsVecX[i]; - } - } - - template - void subColumnFrom(const int32_t colIdx, Matrix& lhsSigmaX, const Vector& rhsVecX) - { - for (int32_t i{ 0 }; i < ROWS; ++i) - { // rows - lhsSigmaX(i, colIdx) -= rhsVecX[i]; - } - } - - template - Vector getColumnAt(const int32_t colIdx, const Matrix& matX) - { - assert(colIdx < COLS); // assert if colIdx is out of boundary - - Vector vecXi; - - for (int32_t i{ 0 }; i < ROWS; ++i) - { // rows - vecXi[i] = matX(i, colIdx); - } - - return vecXi; - } - - template - Matrix getBlock(const Matrix& matA, int32_t startRowIdx, int32_t startColIdx) - { - Matrix matB; - - for (int32_t i = startRowIdx; i < startRowIdx + N_ROWS; ++i) - { - for (int32_t j = startColIdx; j < startColIdx + N_COLS; ++j) - { - matB(i - startRowIdx, j - startColIdx) = matA(i, j); - } - } - - return matB; - } - - template - Matrix getUpperTriangulerView(const Matrix& matA) - { - Matrix matB; - for (int32_t i = 0; i < ROWS; ++i) - { - for (int32_t j = i; j < COLS; ++j) - { - matB(i, j) = matA(i, j); - if (i != j) - { - matB(j, i) = 0.0F; - } - } - } - return matB; - } - - template - Matrix cholupdate(Matrix matL, Matrix matW, float32_t alpha) - { - Matrix matLo; - - for (int32_t i{ 0 }; i < COLS_W; ++i) - { - matLo = matL; - - float32_t b{ 1.0F }; - - for (int32_t j{ 0 }; j < ROWS; ++j) - { - const float32_t ljjPow2{ matL(j, j) * matL(j, j) }; - const float32_t wjiPow2{ matW(j, i) * matW(j, i) }; - const float32_t upsilon{ (ljjPow2 * b) + (alpha * wjiPow2) }; - - matLo(j, j) = std::sqrt(ljjPow2 + ((alpha / b) * wjiPow2)); - - for (int32_t k{ j + 1 }; k < ROWS; ++k) - { - matW(k, i) -= (matW(j, i) / matL(j, j)) * matL(k, j); - matLo(k, j) = ((matLo(j, j) / matL(j, j)) * matL(k, j)) + (matLo(j, j) * alpha * matW(j, i) * matW(k, i) / upsilon); - } - - b += alpha * (wjiPow2 / ljjPow2); - } - - matL = matLo; - } - return matLo; - } - - template - Matrix forwardSubstitute(const Matrix& matA, const Matrix& matB) - { - Matrix matX; - - for (int32_t k{ 0 }; k < COLS; ++k) - { - for (int32_t i{ 0 }; i < ROWS; ++i) - { - float32_t accumulation{ matB(i, k) }; - for (int32_t j{ 0 }; j < i; ++j) - { - accumulation -= matA(i, j) * matX(j, k); - } - - matX(i, k) = accumulation / matA(i, i); - } - } - return matX; - } - - template - Matrix backwardSubstitute(const Matrix& matA, const Matrix& matB) - { - Matrix matX; - - for (int32_t k{ 0 }; k < COLS; ++k) - { - for (int32_t i{ ROWS - 1 }; i >= 0; --i) - { - float32_t accumulation{ matB(i, k) }; - - for (int32_t j{ ROWS - 1 }; j > i; --j) - { - accumulation -= matA(i, j) * matX(j, k); - } - - matX(i, k) = accumulation / matA(i, i); - } - } - return matX; - } - - template - class JointRows - { - public: - JointRows() = delete; - ~JointRows() {} - - explicit JointRows(const Matrix& matM1, const Matrix& matM2) - { - for (int32_t j{ 0 }; j < COLS; ++j) - { - for (int32_t i{ 0 }; i < ROWS1; ++i) - { - m_matJ(i, j) = matM1(i, j); - } - - for (int32_t i{ 0 }; i < ROWS2; ++i) - { - m_matJ(i + ROWS1, j) = matM2(i, j); - } - } - } - - const Matrix& jointMatrix() const { return m_matJ; } - - private: - Matrix m_matJ; - }; - - template - class JointCols - { - public: - JointCols() = delete; - ~JointCols() {} - - explicit JointCols(const Matrix& matM1, const Matrix& matM2) - { - for (int32_t i{ 0 }; i < ROWS; ++i) - { - for (int32_t j{ 0 }; j < COLS1; ++j) - { - m_matJ(i, j) = matM1(i, j); - } - - for (int32_t j{ 0 }; j < COLS2; ++j) - { - m_matJ(i, j + COLS1) = matM2(i, j); - } - } - } - - const Matrix& jointMatrix() const { return m_matJ; } - - private: - Matrix m_matJ; - }; + matB(i, j) = matA(i, j); + if (i != j) + { + matB(j, i) = 0.0F; + } } + } + return matB; } -#endif // KALMAN_FILTER_UTIL_H +template +Matrix cholupdate(Matrix matL, + Matrix matW, float32_t alpha) +{ + Matrix matLo; + + for (int32_t i{0}; i < COLS_W; ++i) + { + matLo = matL; + + float32_t b{1.0F}; + + for (int32_t j{0}; j < ROWS; ++j) + { + const float32_t ljjPow2{matL(j, j) * matL(j, j)}; + const float32_t wjiPow2{matW(j, i) * matW(j, i)}; + const float32_t upsilon{(ljjPow2 * b) + (alpha * wjiPow2)}; + + matLo(j, j) = std::sqrt(ljjPow2 + ((alpha / b) * wjiPow2)); + + for (int32_t k{j + 1}; k < ROWS; ++k) + { + matW(k, i) -= (matW(j, i) / matL(j, j)) * matL(k, j); + matLo(k, j) = ((matLo(j, j) / matL(j, j)) * matL(k, j)) + + (matLo(j, j) * alpha * matW(j, i) * matW(k, i) / upsilon); + } + + b += alpha * (wjiPow2 / ljjPow2); + } + + matL = matLo; + } + return matLo; +} + +template +Matrix forwardSubstitute(const Matrix& matA, + const Matrix& matB) +{ + Matrix matX; + + for (int32_t k{0}; k < COLS; ++k) + { + for (int32_t i{0}; i < ROWS; ++i) + { + float32_t accumulation{matB(i, k)}; + for (int32_t j{0}; j < i; ++j) + { + accumulation -= matA(i, j) * matX(j, k); + } + + matX(i, k) = accumulation / matA(i, i); + } + } + return matX; +} + +template +Matrix backwardSubstitute(const Matrix& matA, + const Matrix& matB) +{ + Matrix matX; + + for (int32_t k{0}; k < COLS; ++k) + { + for (int32_t i{ROWS - 1}; i >= 0; --i) + { + float32_t accumulation{matB(i, k)}; + + for (int32_t j{ROWS - 1}; j > i; --j) + { + accumulation -= matA(i, j) * matX(j, k); + } + + matX(i, k) = accumulation / matA(i, i); + } + } + return matX; +} + +template +class JointRows +{ + public: + JointRows() = delete; + ~JointRows() {} + + explicit JointRows(const Matrix& matM1, + const Matrix& matM2) + { + for (int32_t j{0}; j < COLS; ++j) + { + for (int32_t i{0}; i < ROWS1; ++i) + { + m_matJ(i, j) = matM1(i, j); + } + + for (int32_t i{0}; i < ROWS2; ++i) + { + m_matJ(i + ROWS1, j) = matM2(i, j); + } + } + } + + const Matrix& jointMatrix() const { return m_matJ; } + + private: + Matrix m_matJ; +}; + +template +class JointCols +{ + public: + JointCols() = delete; + ~JointCols() {} + + explicit JointCols(const Matrix& matM1, + const Matrix& matM2) + { + for (int32_t i{0}; i < ROWS; ++i) + { + for (int32_t j{0}; j < COLS1; ++j) + { + m_matJ(i, j) = matM1(i, j); + } + + for (int32_t j{0}; j < COLS2; ++j) + { + m_matJ(i, j + COLS1) = matM2(i, j); + } + } + } + + const Matrix& jointMatrix() const { return m_matJ; } + + private: + Matrix m_matJ; +}; +} // namespace util +} // namespace kf + +#endif // KALMAN_FILTER_UTIL_H diff --git a/tests/unit/kalman_filter/kalman_filter_test.cpp b/tests/unit/kalman_filter/kalman_filter_test.cpp index c8f93cb..6969879 100644 --- a/tests/unit/kalman_filter/kalman_filter_test.cpp +++ b/tests/unit/kalman_filter/kalman_filter_test.cpp @@ -1,115 +1,119 @@ -#include #include "kalman_filter/kalman_filter.h" +#include class KalmanFilterTest : public testing::Test { -public: - virtual void SetUp() override {} - virtual void TearDown() override {} - - static constexpr float FLOAT_EPSILON{ 0.00001F }; - - static constexpr int32_t DIM_X_LKF{ 2 }; - static constexpr int32_t DIM_Z_LKF{ 1 }; - static constexpr int32_t DIM_X_EKF{ 2 }; - static constexpr int32_t DIM_Z_EKF{ 2 }; - static constexpr kf::float32_t Q11{ 0.1F }, Q22{ 0.1F }; - - kf::KalmanFilter m_lkf; - kf::KalmanFilter m_ekf; - - static kf::Vector covertCartesian2Polar(const kf::Vector& cartesian) - { - kf::Vector polar; - polar[0] = std::sqrt(cartesian[0] * cartesian[0] + cartesian[1] * cartesian[1]); - polar[1] = std::atan2(cartesian[1], cartesian[0]); - return polar; - } - - static kf::Matrix calculateJacobianMatrix(const kf::Vector& vecX) - { - const kf::float32_t valX2PlusY2{ (vecX[0] * vecX[0]) + (vecX[1] * vecX[1]) }; - const kf::float32_t valSqrtX2PlusY2{ std::sqrt(valX2PlusY2) }; - - kf::Matrix matHj; - matHj << - (vecX[0] / valSqrtX2PlusY2), (vecX[1] / valSqrtX2PlusY2), - (-vecX[1] / valX2PlusY2), (vecX[0] / valX2PlusY2); - - return matHj; - } + public: + virtual void SetUp() override {} + virtual void TearDown() override {} + + static constexpr float FLOAT_EPSILON{0.00001F}; + + static constexpr int32_t DIM_X_LKF{2}; + static constexpr int32_t DIM_Z_LKF{1}; + static constexpr int32_t DIM_X_EKF{2}; + static constexpr int32_t DIM_Z_EKF{2}; + static constexpr kf::float32_t Q11{0.1F}, Q22{0.1F}; + + kf::KalmanFilter m_lkf; + kf::KalmanFilter m_ekf; + + static kf::Vector covertCartesian2Polar( + const kf::Vector& cartesian) + { + kf::Vector polar; + polar[0] = + std::sqrt(cartesian[0] * cartesian[0] + cartesian[1] * cartesian[1]); + polar[1] = std::atan2(cartesian[1], cartesian[0]); + return polar; + } + + static kf::Matrix calculateJacobianMatrix( + const kf::Vector& vecX) + { + const kf::float32_t valX2PlusY2{(vecX[0] * vecX[0]) + (vecX[1] * vecX[1])}; + const kf::float32_t valSqrtX2PlusY2{std::sqrt(valX2PlusY2)}; + + kf::Matrix matHj; + matHj << (vecX[0] / valSqrtX2PlusY2), (vecX[1] / valSqrtX2PlusY2), + (-vecX[1] / valX2PlusY2), (vecX[0] / valX2PlusY2); + + return matHj; + } }; TEST_F(KalmanFilterTest, test_predictLKF) { - static constexpr float SAMPLE_TIME_SEC{ 1.0F }; + static constexpr float SAMPLE_TIME_SEC{1.0F}; - m_lkf.vecX() << 0.0F, 2.0F; - m_lkf.matP() << 0.1F, 0.0F, 0.0F, 0.1F; + m_lkf.vecX() << 0.0F, 2.0F; + m_lkf.matP() << 0.1F, 0.0F, 0.0F, 0.1F; - kf::Matrix F; // state transition matrix - F << 1.0F, SAMPLE_TIME_SEC, 0.0F, 1.0F; + kf::Matrix F; // state transition matrix + F << 1.0F, SAMPLE_TIME_SEC, 0.0F, 1.0F; - kf::Matrix Q; // process noise covariance - Q(0, 0) = (Q11 * SAMPLE_TIME_SEC) + (Q22 * (std::pow(SAMPLE_TIME_SEC, 3.0F) / 3.0F)); - Q(0, 1) = Q(1, 0) = Q22 * (std::pow(SAMPLE_TIME_SEC, 2.0F) / 2.0F); - Q(1, 1) = Q22 * SAMPLE_TIME_SEC; + kf::Matrix Q; // process noise covariance + Q(0, 0) = (Q11 * SAMPLE_TIME_SEC) + + (Q22 * (std::pow(SAMPLE_TIME_SEC, 3.0F) / 3.0F)); + Q(0, 1) = Q(1, 0) = Q22 * (std::pow(SAMPLE_TIME_SEC, 2.0F) / 2.0F); + Q(1, 1) = Q22 * SAMPLE_TIME_SEC; - m_lkf.predictLKF(F, Q); // execute prediction step + m_lkf.predictLKF(F, Q); // execute prediction step - ASSERT_NEAR(m_lkf.vecX()(0), 2.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.vecX()(1), 2.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.vecX()(0), 2.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.vecX()(1), 2.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(0), 0.33333F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(1), 0.15F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(2), 0.15F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(3), 0.2F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(0), 0.33333F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(1), 0.15F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(2), 0.15F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(3), 0.2F, FLOAT_EPSILON); } TEST_F(KalmanFilterTest, test_correctLKF) { - m_lkf.vecX() << 2.0F, 2.0F; - m_lkf.matP() << 0.33333F, 0.15F, 0.15F, 0.2F; + m_lkf.vecX() << 2.0F, 2.0F; + m_lkf.matP() << 0.33333F, 0.15F, 0.15F, 0.2F; - kf::Vector vecZ; - vecZ << 2.25F; + kf::Vector vecZ; + vecZ << 2.25F; - kf::Matrix matR; - matR << 0.01F; + kf::Matrix matR; + matR << 0.01F; - kf::Matrix matH; - matH << 1.0F, 0.0F; + kf::Matrix matH; + matH << 1.0F, 0.0F; - m_lkf.correctLKF(vecZ, matR, matH); + m_lkf.correctLKF(vecZ, matR, matH); - ASSERT_NEAR(m_lkf.vecX()(0), 2.24272F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.vecX()(1), 2.10922F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.vecX()(0), 2.24272F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.vecX()(1), 2.10922F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(0), 0.00970874F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(1), 0.00436893F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(2), 0.00436893F, FLOAT_EPSILON); - ASSERT_NEAR(m_lkf.matP()(3), 0.134466F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(0), 0.00970874F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(1), 0.00436893F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(2), 0.00436893F, FLOAT_EPSILON); + ASSERT_NEAR(m_lkf.matP()(3), 0.134466F, FLOAT_EPSILON); } TEST_F(KalmanFilterTest, test_correctEKF) { - m_ekf.vecX() << 10.0F, 5.0F; - m_ekf.matP() << 0.3F, 0.0F, 0.0F, 0.3F; + m_ekf.vecX() << 10.0F, 5.0F; + m_ekf.matP() << 0.3F, 0.0F, 0.0F, 0.3F; - const kf::Vector measPosCart{ 10.4F, 5.2F }; - const kf::Vector vecZ{ covertCartesian2Polar(measPosCart) }; + const kf::Vector measPosCart{10.4F, 5.2F}; + const kf::Vector vecZ{covertCartesian2Polar(measPosCart)}; - kf::Matrix matR; - matR << 0.1F, 0.0F, 0.0F, 0.0008F; + kf::Matrix matR; + matR << 0.1F, 0.0F, 0.0F, 0.0008F; - kf::Matrix matHj{ calculateJacobianMatrix(m_ekf.vecX()) }; // jacobian matrix Hj + kf::Matrix matHj{ + calculateJacobianMatrix(m_ekf.vecX())}; // jacobian matrix Hj - m_ekf.correctEkf(covertCartesian2Polar, vecZ, matR, matHj); + m_ekf.correctEkf(covertCartesian2Polar, vecZ, matR, matHj); - ASSERT_NEAR(m_ekf.vecX()(0), 10.3F, FLOAT_EPSILON); - ASSERT_NEAR(m_ekf.vecX()(1), 5.15F, FLOAT_EPSILON); + ASSERT_NEAR(m_ekf.vecX()(0), 10.3F, FLOAT_EPSILON); + ASSERT_NEAR(m_ekf.vecX()(1), 5.15F, FLOAT_EPSILON); - ASSERT_NEAR(m_ekf.matP()(0), 0.075F, FLOAT_EPSILON); - ASSERT_NEAR(m_ekf.matP()(1), -1.78814e-08F, FLOAT_EPSILON); - ASSERT_NEAR(m_ekf.matP()(2), -1.78814e-08F, FLOAT_EPSILON); - ASSERT_NEAR(m_ekf.matP()(3), 0.075F, FLOAT_EPSILON); + ASSERT_NEAR(m_ekf.matP()(0), 0.075F, FLOAT_EPSILON); + ASSERT_NEAR(m_ekf.matP()(1), -1.78814e-08F, FLOAT_EPSILON); + ASSERT_NEAR(m_ekf.matP()(2), -1.78814e-08F, FLOAT_EPSILON); + ASSERT_NEAR(m_ekf.matP()(3), 0.075F, FLOAT_EPSILON); } diff --git a/tests/unit/kalman_filter/square_root_ukf_test.cpp b/tests/unit/kalman_filter/square_root_ukf_test.cpp index 3908901..a69cd2a 100644 --- a/tests/unit/kalman_filter/square_root_ukf_test.cpp +++ b/tests/unit/kalman_filter/square_root_ukf_test.cpp @@ -1,90 +1,84 @@ -#include "gtest/gtest.h" #include "kalman_filter/square_root_ukf.h" +#include "gtest/gtest.h" class SquareRootUnscentedKalmanFilterTest : public testing::Test { -public: - virtual void SetUp() override {} - virtual void TearDown() override {} + public: + virtual void SetUp() override {} + virtual void TearDown() override {} - static constexpr float FLOAT_EPSILON{ 0.001F }; + static constexpr float FLOAT_EPSILON{0.001F}; - static constexpr size_t DIM_X{ 2 }; - static constexpr size_t DIM_Z{ 2 }; + static constexpr size_t DIM_X{2}; + static constexpr size_t DIM_Z{2}; - kf::SquareRootUKF m_srUkf; + kf::SquareRootUKF m_srUkf; - static kf::Vector funcF(const kf::Vector& x) - { - return x; - } + static kf::Vector funcF(const kf::Vector& x) { return x; } }; TEST_F(SquareRootUnscentedKalmanFilterTest, test_SRUKF_Example01) { - // initializations - //x0 = np.array([1.0, 2.0]) - //P0 = np.array([[1.0, 0.5], [0.5, 1.0]]) - //Q = np.array([[0.5, 0.0], [0.0, 0.5]]) + // initializations + // x0 = np.array([1.0, 2.0]) + // P0 = np.array([[1.0, 0.5], [0.5, 1.0]]) + // Q = np.array([[0.5, 0.0], [0.0, 0.5]]) - //z = np.array([1.2, 1.8]) - //R = np.array([[0.3, 0.0], [0.0, 0.3]]) + // z = np.array([1.2, 1.8]) + // R = np.array([[0.3, 0.0], [0.0, 0.3]]) - kf::Vector x; - x << 1.0F, 2.0F; + kf::Vector x; + x << 1.0F, 2.0F; - kf::Matrix P; - P << 1.0F, 0.5F, - 0.5F, 1.0F; + kf::Matrix P; + P << 1.0F, 0.5F, 0.5F, 1.0F; - kf::Matrix Q; - Q << 0.5F, 0.0F, - 0.0F, 0.5F; + kf::Matrix Q; + Q << 0.5F, 0.0F, 0.0F, 0.5F; - kf::Vector z; - z << 1.2F, 1.8F; + kf::Vector z; + z << 1.2F, 1.8F; - kf::Matrix R; - R << 0.3F, 0.0F, - 0.0F, 0.3F; + kf::Matrix R; + R << 0.3F, 0.0F, 0.0F, 0.3F; - m_srUkf.initialize(x, P, Q, R); + m_srUkf.initialize(x, P, Q, R); - m_srUkf.predictSRUKF(funcF); + m_srUkf.predictSRUKF(funcF); - // Expectation from the python results: - // ===================================== - //x1 = - // [1. 2.] - //P1 = - // [[1.5 0.5] - // [0.5 1.5]] + // Expectation from the python results: + // ===================================== + // x1 = + // [1. 2.] + // P1 = + // [[1.5 0.5] + // [0.5 1.5]] - ASSERT_NEAR(m_srUkf.vecX()[0], 1.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.vecX()[1], 2.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.vecX()[0], 1.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.vecX()[1], 2.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(0, 0), 1.5F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(0, 1), 0.5F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(0, 0), 1.5F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(0, 1), 0.5F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(1, 0), 0.5F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(1, 1), 1.5F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(1, 0), 0.5F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(1, 1), 1.5F, FLOAT_EPSILON); - m_srUkf.correctSRUKF(funcF, z); + m_srUkf.correctSRUKF(funcF, z); - // Expectations from the python results: - // ====================================== - // x = - // [1.15385 1.84615] - // P = - // [[ 0.24582 0.01505 ] - // [ 0.01505 0.24582 ]] + // Expectations from the python results: + // ====================================== + // x = + // [1.15385 1.84615] + // P = + // [[ 0.24582 0.01505 ] + // [ 0.01505 0.24582 ]] - ASSERT_NEAR(m_srUkf.vecX()[0], 1.15385F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.vecX()[1], 1.84615F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.vecX()[0], 1.15385F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.vecX()[1], 1.84615F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(0, 0), 0.24582F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(0, 1), 0.01505F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(0, 0), 0.24582F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(0, 1), 0.01505F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(1, 0), 0.01505F, FLOAT_EPSILON); - ASSERT_NEAR(m_srUkf.matP()(1, 1), 0.24582F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(1, 0), 0.01505F, FLOAT_EPSILON); + ASSERT_NEAR(m_srUkf.matP()(1, 1), 0.24582F, FLOAT_EPSILON); } \ No newline at end of file diff --git a/tests/unit/kalman_filter/unscented_kalman_filter_test.cpp b/tests/unit/kalman_filter/unscented_kalman_filter_test.cpp index 5bf27b3..efc1fe4 100644 --- a/tests/unit/kalman_filter/unscented_kalman_filter_test.cpp +++ b/tests/unit/kalman_filter/unscented_kalman_filter_test.cpp @@ -1,144 +1,142 @@ -#include "gtest/gtest.h" #include "kalman_filter/unscented_kalman_filter.h" +#include "gtest/gtest.h" class UnscentedKalmanFilterTest : public testing::Test { -public: - virtual void SetUp() override {} - virtual void TearDown() override {} - - static constexpr float FLOAT_EPSILON{ 0.001F }; - - static constexpr size_t DIM_X{ 4 }; - static constexpr size_t DIM_V{ 4 }; - static constexpr size_t DIM_Z{ 2 }; - static constexpr size_t DIM_N{ 2 }; - - kf::UnscentedKalmanFilter m_ukf; - - static kf::Vector funcF(const kf::Vector& x, const kf::Vector& v) - { - kf::Vector y; - y[0] = x[0] + x[2] + v[0]; - y[1] = x[1] + x[3] + v[1]; - y[2] = x[2] + v[2]; - y[3] = x[3] + v[3]; - return y; - } - - static kf::Vector funcH(const kf::Vector& x, const kf::Vector& n) - { - kf::Vector y; - - kf::float32_t px{ x[0] + n[0] }; - kf::float32_t py{ x[1] + n[1] }; - - y[0] = std::sqrt((px * px) + (py * py)); - y[1] = std::atan(py / (px + std::numeric_limits::epsilon())); - return y; - } + public: + virtual void SetUp() override {} + virtual void TearDown() override {} + + static constexpr float FLOAT_EPSILON{0.001F}; + + static constexpr size_t DIM_X{4}; + static constexpr size_t DIM_V{4}; + static constexpr size_t DIM_Z{2}; + static constexpr size_t DIM_N{2}; + + kf::UnscentedKalmanFilter m_ukf; + + static kf::Vector funcF(const kf::Vector& x, + const kf::Vector& v) + { + kf::Vector y; + y[0] = x[0] + x[2] + v[0]; + y[1] = x[1] + x[3] + v[1]; + y[2] = x[2] + v[2]; + y[3] = x[3] + v[3]; + return y; + } + + static kf::Vector funcH(const kf::Vector& x, + const kf::Vector& n) + { + kf::Vector y; + + kf::float32_t px{x[0] + n[0]}; + kf::float32_t py{x[1] + n[1]}; + + y[0] = std::sqrt((px * px) + (py * py)); + y[1] = std::atan(py / (px + std::numeric_limits::epsilon())); + return y; + } }; TEST_F(UnscentedKalmanFilterTest, test_UKF_Example01) { - kf::Vector x; - x << 2.0F, 1.0F, 0.0F, 0.0F; - - kf::Matrix P; - P << 0.01F, 0.0F, 0.0F, 0.0F, - 0.0F, 0.01F, 0.0F, 0.0F, - 0.0F, 0.0F, 0.05F, 0.0F, - 0.0F, 0.0F, 0.0F, 0.05F; - - kf::Matrix Q; - Q << 0.05F, 0.0F, 0.0F, 0.0F, - 0.0F, 0.05F, 0.0F, 0.0F, - 0.0F, 0.0F, 0.1F, 0.0F, - 0.0F, 0.0F, 0.0F, 0.1F; - - kf::Matrix R; - R << 0.01F, 0.0F, 0.0F, 0.01F; - - kf::Vector z; - z << 2.5F, 0.05F; - - m_ukf.vecX() = x; - m_ukf.matP() = P; - - m_ukf.setCovarianceQ(Q); - m_ukf.setCovarianceR(R); - - m_ukf.predictUKF(funcF); - - // Expectation from the python results: - // ===================================== - // x = - // [2.0 1.0 0.0 0.0] - // P = - // [[0.11 0.00 0.05 0.00] - // [0.00 0.11 0.00 0.05] - // [0.05 0.00 0.15 0.00] - // [0.00 0.05 0.00 0.15]] - - ASSERT_NEAR(m_ukf.vecX()[0], 2.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.vecX()[1], 1.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.vecX()[2], 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.vecX()[3], 0.0F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(0, 0), 0.11F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(0, 1), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(0, 2), 0.05F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(0, 3), 0.0F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(1, 0), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(1, 1), 0.11F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(1, 2), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(1, 3), 0.05F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(2, 0), 0.05F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(2, 1), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(2, 2), 0.15F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(2, 3), 0.0F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(3, 0), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(3, 1), 0.05F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(3, 2), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(3, 3), 0.15F, FLOAT_EPSILON); - - m_ukf.correctUKF(funcH, z); - - // Expectations from the python results: - // ====================================== - // x = - // [ 2.554 0.356 0.252 -0.293] - // P = - // [[ 0.01 -0.001 0.005 -0. ] - // [-0.001 0.01 - 0. 0.005 ] - // [ 0.005 - 0. 0.129 - 0. ] - // [-0. 0.005 - 0. 0.129]] - - ASSERT_NEAR(m_ukf.vecX()[0], 2.554F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.vecX()[1], 0.356F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.vecX()[2], 0.252F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.vecX()[3], -0.293F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(0, 0), 0.01F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(0, 1), -0.001F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(0, 2), 0.005F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(0, 3), 0.0F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(1, 0), -0.001F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(1, 1), 0.01F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(1, 2), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(1, 3), 0.005F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(2, 0), 0.005F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(2, 1), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(2, 2), 0.129F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(2, 3), 0.0F, FLOAT_EPSILON); - - ASSERT_NEAR(m_ukf.matP()(3, 0), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(3, 1), 0.005F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(3, 2), 0.0F, FLOAT_EPSILON); - ASSERT_NEAR(m_ukf.matP()(3, 3), 0.129F, FLOAT_EPSILON); + kf::Vector x; + x << 2.0F, 1.0F, 0.0F, 0.0F; + + kf::Matrix P; + P << 0.01F, 0.0F, 0.0F, 0.0F, 0.0F, 0.01F, 0.0F, 0.0F, 0.0F, 0.0F, 0.05F, + 0.0F, 0.0F, 0.0F, 0.0F, 0.05F; + + kf::Matrix Q; + Q << 0.05F, 0.0F, 0.0F, 0.0F, 0.0F, 0.05F, 0.0F, 0.0F, 0.0F, 0.0F, 0.1F, 0.0F, + 0.0F, 0.0F, 0.0F, 0.1F; + + kf::Matrix R; + R << 0.01F, 0.0F, 0.0F, 0.01F; + + kf::Vector z; + z << 2.5F, 0.05F; + + m_ukf.vecX() = x; + m_ukf.matP() = P; + + m_ukf.setCovarianceQ(Q); + m_ukf.setCovarianceR(R); + + m_ukf.predictUKF(funcF); + + // Expectation from the python results: + // ===================================== + // x = + // [2.0 1.0 0.0 0.0] + // P = + // [[0.11 0.00 0.05 0.00] + // [0.00 0.11 0.00 0.05] + // [0.05 0.00 0.15 0.00] + // [0.00 0.05 0.00 0.15]] + + ASSERT_NEAR(m_ukf.vecX()[0], 2.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.vecX()[1], 1.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.vecX()[2], 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.vecX()[3], 0.0F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(0, 0), 0.11F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(0, 1), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(0, 2), 0.05F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(0, 3), 0.0F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(1, 0), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(1, 1), 0.11F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(1, 2), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(1, 3), 0.05F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(2, 0), 0.05F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(2, 1), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(2, 2), 0.15F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(2, 3), 0.0F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(3, 0), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(3, 1), 0.05F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(3, 2), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(3, 3), 0.15F, FLOAT_EPSILON); + + m_ukf.correctUKF(funcH, z); + + // Expectations from the python results: + // ====================================== + // x = + // [ 2.554 0.356 0.252 -0.293] + // P = + // [[ 0.01 -0.001 0.005 -0. ] + // [-0.001 0.01 - 0. 0.005 ] + // [ 0.005 - 0. 0.129 - 0. ] + // [-0. 0.005 - 0. 0.129]] + + ASSERT_NEAR(m_ukf.vecX()[0], 2.554F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.vecX()[1], 0.356F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.vecX()[2], 0.252F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.vecX()[3], -0.293F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(0, 0), 0.01F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(0, 1), -0.001F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(0, 2), 0.005F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(0, 3), 0.0F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(1, 0), -0.001F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(1, 1), 0.01F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(1, 2), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(1, 3), 0.005F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(2, 0), 0.005F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(2, 1), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(2, 2), 0.129F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(2, 3), 0.0F, FLOAT_EPSILON); + + ASSERT_NEAR(m_ukf.matP()(3, 0), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(3, 1), 0.005F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(3, 2), 0.0F, FLOAT_EPSILON); + ASSERT_NEAR(m_ukf.matP()(3, 3), 0.129F, FLOAT_EPSILON); } diff --git a/tests/unit/kalman_filter/unscented_trasform_test.cpp b/tests/unit/kalman_filter/unscented_trasform_test.cpp index ce8d4c5..1719528 100644 --- a/tests/unit/kalman_filter/unscented_trasform_test.cpp +++ b/tests/unit/kalman_filter/unscented_trasform_test.cpp @@ -1,73 +1,73 @@ -#include "gtest/gtest.h" #include "kalman_filter/unscented_transform.h" +#include "gtest/gtest.h" class UnscentedTransformTest : public testing::Test { -public: - virtual void SetUp() override {} - virtual void TearDown() override {} - - static constexpr float ASSERT_FLT_EPSILON{ 0.00001F }; - - static constexpr size_t DIM_1{ 1 }; - static constexpr size_t DIM_2{ 2 }; - - static kf::Vector function1(const kf::Vector& x) - { - kf::Vector y; - y[0] = x[0] * x[0]; - return y; - } - - static kf::Vector function2(const kf::Vector& x) - { - kf::Vector y; - y[0] = x[0] * x[0]; - y[1] = x[1] * x[1]; - return y; - } + public: + virtual void SetUp() override {} + virtual void TearDown() override {} + + static constexpr float ASSERT_FLT_EPSILON{0.00001F}; + + static constexpr size_t DIM_1{1}; + static constexpr size_t DIM_2{2}; + + static kf::Vector function1(const kf::Vector& x) + { + kf::Vector y; + y[0] = x[0] * x[0]; + return y; + } + + static kf::Vector function2(const kf::Vector& x) + { + kf::Vector y; + y[0] = x[0] * x[0]; + y[1] = x[1] * x[1]; + return y; + } }; TEST_F(UnscentedTransformTest, test_unscentedTransform_Example1) { - kf::Vector x; - x << 0.0F; + kf::Vector x; + x << 0.0F; - kf::Matrix P; - P << 0.5F; + kf::Matrix P; + P << 0.5F; - kf::UnscentedTransform UT; - UT.compute(x, P, 0.0F); + kf::UnscentedTransform UT; + UT.compute(x, P, 0.0F); - kf::Vector vecY; - kf::Matrix matPyy; + kf::Vector vecY; + kf::Matrix matPyy; - UT.transform(function1, vecY, matPyy); + UT.transform(function1, vecY, matPyy); - ASSERT_NEAR(vecY[0], 0.5F, ASSERT_FLT_EPSILON); - ASSERT_NEAR(matPyy[0], 0.0F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(vecY[0], 0.5F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(matPyy[0], 0.0F, ASSERT_FLT_EPSILON); } TEST_F(UnscentedTransformTest, test_unscentedTransform_Example2) { - kf::Vector x; - x << 2.0F, 1.0F; + kf::Vector x; + x << 2.0F, 1.0F; - kf::Matrix P; - P << 0.1F, 0.0F, 0.0F, 0.1F; + kf::Matrix P; + P << 0.1F, 0.0F, 0.0F, 0.1F; - kf::UnscentedTransform UT; - UT.compute(x, P, 0.0F); + kf::UnscentedTransform UT; + UT.compute(x, P, 0.0F); - kf::Vector vecY; - kf::Matrix matPyy; + kf::Vector vecY; + kf::Matrix matPyy; - UT.transform(function2, vecY, matPyy); + UT.transform(function2, vecY, matPyy); - ASSERT_NEAR(vecY[0], 4.1F, ASSERT_FLT_EPSILON); - ASSERT_NEAR(vecY[1], 1.1F, ASSERT_FLT_EPSILON); - ASSERT_NEAR(matPyy(0, 0), 1.61F, ASSERT_FLT_EPSILON); - ASSERT_NEAR(matPyy(0, 1), -0.01F, ASSERT_FLT_EPSILON); - ASSERT_NEAR(matPyy(1, 0), -0.01F, ASSERT_FLT_EPSILON); - ASSERT_NEAR(matPyy(1, 1), 0.41F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(vecY[0], 4.1F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(vecY[1], 1.1F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(matPyy(0, 0), 1.61F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(matPyy(0, 1), -0.01F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(matPyy(1, 0), -0.01F, ASSERT_FLT_EPSILON); + ASSERT_NEAR(matPyy(1, 1), 0.41F, ASSERT_FLT_EPSILON); } \ No newline at end of file diff --git a/tests/unit/motion_model/ego_motion_model_test.cpp b/tests/unit/motion_model/ego_motion_model_test.cpp index 28e1ec2..eed5c19 100644 --- a/tests/unit/motion_model/ego_motion_model_test.cpp +++ b/tests/unit/motion_model/ego_motion_model_test.cpp @@ -1,263 +1,280 @@ -#include -#include "motion_model/ego_motion_model.h" #include "kalman_filter/kalman_filter.h" +#include "motion_model/ego_motion_model.h" #include "types.h" +#include using namespace kf; class EgoMotionModelTest : public testing::Test { -public: - virtual void SetUp() override {} - virtual void TearDown() override {} + public: + virtual void SetUp() override {} + virtual void TearDown() override {} - static constexpr int32_t DIM_X{ 3 }; - static constexpr int32_t DIM_U{ 2 }; - static constexpr int32_t DIM_Z{ 1 }; + static constexpr int32_t DIM_X{3}; + static constexpr int32_t DIM_U{2}; + static constexpr int32_t DIM_Z{1}; }; TEST_F(EgoMotionModelTest, test_egoMotionModel_Covariances) { - kf::Vector vecX; - vecX << 1.0F, 2.0F, 0.015F; - - kf::Vector vecU; - vecU << 0.5F, 0.005F; - - kf::motionmodel::EgoMotionModel egoMotionModel; - - float32_t const qX{ 0.2F }; - float32_t const qY{ 0.3F }; - float32_t const qTheta{ 0.003F }; - - float32_t const uDeltaDist{ 0.02F }; - float32_t const uDeltaYaw{ 0.0002F }; - - egoMotionModel.setNoiseX(qX); - egoMotionModel.setNoiseY(qY); - egoMotionModel.setNoiseTheta(qTheta); - egoMotionModel.setNoiseDeltaDist(uDeltaDist); - egoMotionModel.setNoiseDeltaYaw(uDeltaYaw); - - kf::Matrix const matQk{ egoMotionModel.getProcessNoiseCov(vecX, vecU) }; - kf::Matrix const matUk{ egoMotionModel.getInputNoiseCov(vecX, vecU) }; - - kf::Matrix const matFk{ egoMotionModel.getJacobianFk(vecX, vecU) }; - kf::Matrix const matBk{ egoMotionModel.getJacobianBk(vecX, vecU) }; - - kf::Matrix matQ; - matQ << qX, 0.0F, 0.0F, - 0.0F, qY, 0.0F, - 0.0F, 0.0F, qTheta; - - kf::Matrix matU; - matU << uDeltaDist, 0.0F, - 0.0F, uDeltaYaw; - - kf::Matrix const matQkExp{ matFk * matQ * matFk.transpose() }; - kf::Matrix const matUkExp{ matBk * matU * matBk.transpose() }; - - EXPECT_NEAR(matQk(0,0), matQkExp(0,0), 0.0001F); - EXPECT_NEAR(matQk(0,1), matQkExp(0,1), 0.0001F); - EXPECT_NEAR(matQk(0,2), matQkExp(0,2), 0.0001F); - EXPECT_NEAR(matQk(1,0), matQkExp(1,0), 0.0001F); - EXPECT_NEAR(matQk(1,1), matQkExp(1,1), 0.0001F); - EXPECT_NEAR(matQk(1,2), matQkExp(1,2), 0.0001F); - EXPECT_NEAR(matQk(2,0), matQkExp(2,0), 0.0001F); - EXPECT_NEAR(matQk(2,1), matQkExp(2,1), 0.0001F); - EXPECT_NEAR(matQk(2,2), matQkExp(2,2), 0.0001F); - - EXPECT_NEAR(matUk(0,0), matUkExp(0,0), 0.0001F); - EXPECT_NEAR(matUk(0,1), matUkExp(0,1), 0.0001F); - EXPECT_NEAR(matUk(0,2), matUkExp(0,2), 0.0001F); - EXPECT_NEAR(matUk(1,0), matUkExp(1,0), 0.0001F); - EXPECT_NEAR(matUk(1,1), matUkExp(1,1), 0.0001F); - EXPECT_NEAR(matUk(1,2), matUkExp(1,2), 0.0001F); - EXPECT_NEAR(matUk(2,0), matUkExp(2,0), 0.0001F); - EXPECT_NEAR(matUk(2,1), matUkExp(2,1), 0.0001F); - EXPECT_NEAR(matUk(2,2), matUkExp(2,2), 0.0001F); + kf::Vector vecX; + vecX << 1.0F, 2.0F, 0.015F; + + kf::Vector vecU; + vecU << 0.5F, 0.005F; + + kf::motionmodel::EgoMotionModel egoMotionModel; + + float32_t const qX{0.2F}; + float32_t const qY{0.3F}; + float32_t const qTheta{0.003F}; + + float32_t const uDeltaDist{0.02F}; + float32_t const uDeltaYaw{0.0002F}; + + egoMotionModel.setNoiseX(qX); + egoMotionModel.setNoiseY(qY); + egoMotionModel.setNoiseTheta(qTheta); + egoMotionModel.setNoiseDeltaDist(uDeltaDist); + egoMotionModel.setNoiseDeltaYaw(uDeltaYaw); + + kf::Matrix const matQk{ + egoMotionModel.getProcessNoiseCov(vecX, vecU)}; + kf::Matrix const matUk{ + egoMotionModel.getInputNoiseCov(vecX, vecU)}; + + kf::Matrix const matFk{ + egoMotionModel.getJacobianFk(vecX, vecU)}; + kf::Matrix const matBk{ + egoMotionModel.getJacobianBk(vecX, vecU)}; + + kf::Matrix matQ; + matQ << qX, 0.0F, 0.0F, 0.0F, qY, 0.0F, 0.0F, 0.0F, qTheta; + + kf::Matrix matU; + matU << uDeltaDist, 0.0F, 0.0F, uDeltaYaw; + + kf::Matrix const matQkExp{matFk * matQ * matFk.transpose()}; + kf::Matrix const matUkExp{matBk * matU * matBk.transpose()}; + + EXPECT_NEAR(matQk(0, 0), matQkExp(0, 0), 0.0001F); + EXPECT_NEAR(matQk(0, 1), matQkExp(0, 1), 0.0001F); + EXPECT_NEAR(matQk(0, 2), matQkExp(0, 2), 0.0001F); + EXPECT_NEAR(matQk(1, 0), matQkExp(1, 0), 0.0001F); + EXPECT_NEAR(matQk(1, 1), matQkExp(1, 1), 0.0001F); + EXPECT_NEAR(matQk(1, 2), matQkExp(1, 2), 0.0001F); + EXPECT_NEAR(matQk(2, 0), matQkExp(2, 0), 0.0001F); + EXPECT_NEAR(matQk(2, 1), matQkExp(2, 1), 0.0001F); + EXPECT_NEAR(matQk(2, 2), matQkExp(2, 2), 0.0001F); + + EXPECT_NEAR(matUk(0, 0), matUkExp(0, 0), 0.0001F); + EXPECT_NEAR(matUk(0, 1), matUkExp(0, 1), 0.0001F); + EXPECT_NEAR(matUk(0, 2), matUkExp(0, 2), 0.0001F); + EXPECT_NEAR(matUk(1, 0), matUkExp(1, 0), 0.0001F); + EXPECT_NEAR(matUk(1, 1), matUkExp(1, 1), 0.0001F); + EXPECT_NEAR(matUk(1, 2), matUkExp(1, 2), 0.0001F); + EXPECT_NEAR(matUk(2, 0), matUkExp(2, 0), 0.0001F); + EXPECT_NEAR(matUk(2, 1), matUkExp(2, 1), 0.0001F); + EXPECT_NEAR(matUk(2, 2), matUkExp(2, 2), 0.0001F); } TEST_F(EgoMotionModelTest, test_egoMotionModel_ForwardReverseMoves) { - kf::motionmodel::EgoMotionModel egoMotionModel; - kf::Vector vecX; - kf::Vector vecU; - kf::Vector vecXn; + kf::motionmodel::EgoMotionModel egoMotionModel; + kf::Vector vecX; + kf::Vector vecU; + kf::Vector vecXn; - // moving forward - vecX << 0.0F, 0.0F, 0.0F; - vecU << 0.5F, 0.0F; + // moving forward + vecX << 0.0F, 0.0F, 0.0F; + vecU << 0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0]+0.5F, 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1], 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); + EXPECT_NEAR(vecXn[0], vecX[0] + 0.5F, 0.0001F); + EXPECT_NEAR(vecXn[1], vecX[1], 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); - // moving backward/reverse - vecX << 0.0F, 0.0F, 0.0F; - vecU << -0.5F, 0.0F; + // moving backward/reverse + vecX << 0.0F, 0.0F, 0.0F; + vecU << -0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0]-0.5F, 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1], 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); + EXPECT_NEAR(vecXn[0], vecX[0] - 0.5F, 0.0001F); + EXPECT_NEAR(vecXn[1], vecX[1], 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); - // moving forward + oriented 90 degrees - vecX << 0.0F, 0.0F, M_PI / 2.0F; - vecU << 0.5F, 0.0F; + // moving forward + oriented 90 degrees + vecX << 0.0F, 0.0F, M_PI / 2.0F; + vecU << 0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1] + 0.5F, 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); + EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); + EXPECT_NEAR(vecXn[1], vecX[1] + 0.5F, 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); - // moving backward + oriented 90 degrees - vecX << 0.0F, 0.0F, M_PI / 2.0F; - vecU << -0.5F, 0.0F; + // moving backward + oriented 90 degrees + vecX << 0.0F, 0.0F, M_PI / 2.0F; + vecU << -0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1] - 0.5F, 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); + EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); + EXPECT_NEAR(vecXn[1], vecX[1] - 0.5F, 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); - // moving forward + oriented -90 degrees - vecX << 0.0F, 0.0F, -M_PI / 2.0F; - vecU << 0.5F, 0.0F; + // moving forward + oriented -90 degrees + vecX << 0.0F, 0.0F, -M_PI / 2.0F; + vecU << 0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1] - 0.5F, 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); + EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); + EXPECT_NEAR(vecXn[1], vecX[1] - 0.5F, 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); - // moving backward + oriented -90 degrees - vecX << 0.0F, 0.0F, -M_PI / 2.0F; - vecU << -0.5F, 0.0F; + // moving backward + oriented -90 degrees + vecX << 0.0F, 0.0F, -M_PI / 2.0F; + vecU << -0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1] + 0.5F, 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); + EXPECT_NEAR(vecXn[0], vecX[0], 0.0001F); + EXPECT_NEAR(vecXn[1], vecX[1] + 0.5F, 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2], 0.0001F); - // moving forward + oriented 45 degrees - vecX << 0.0F, 0.0F, M_PI / 4.0F; - vecU << 0.5F, 0.0F; + // moving forward + oriented 45 degrees + vecX << 0.0F, 0.0F, M_PI / 4.0F; + vecU << 0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2] + vecU[1], 0.0001F); + EXPECT_NEAR(vecXn[0], + vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(vecXn[1], + vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2] + vecU[1], 0.0001F); - // moving forward + oriented 45 degrees - vecX << 0.0F, 0.0F, -M_PI / 4.0F; - vecU << 0.5F, 0.0F; + // moving forward + oriented 45 degrees + vecX << 0.0F, 0.0F, -M_PI / 4.0F; + vecU << 0.5F, 0.0F; - vecXn = egoMotionModel.f(vecX, vecU); + vecXn = egoMotionModel.f(vecX, vecU); - EXPECT_NEAR(vecXn[0], vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(vecXn[1], vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(vecXn[2], vecX[2] + vecU[1], 0.0001F); + EXPECT_NEAR(vecXn[0], + vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(vecXn[1], + vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(vecXn[2], vecX[2] + vecU[1], 0.0001F); } TEST_F(EgoMotionModelTest, test_egoMotionModel_EKF_ForwardReverseMoves) { - kf::KalmanFilter kalmanFilter; - kf::motionmodel::EgoMotionModel egoMotionModel; - kf::Vector vecX; - kf::Vector vecU; - - // moving forward - vecX << 0.0F, 0.0F, 0.0F; - vecU << 0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0]+0.5F, 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1], 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); - - // moving backward/reverse - vecX << 0.0F, 0.0F, 0.0F; - vecU << -0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0]-0.5F, 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1], 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); - - // moving forward + oriented 90 degrees - vecX << 0.0F, 0.0F, M_PI / 2.0F; - vecU << 0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] + 0.5F, 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); - - // moving backward + oriented 90 degrees - vecX << 0.0F, 0.0F, M_PI / 2.0F; - vecU << -0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] - 0.5F, 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); - - // moving forward + oriented -90 degrees - vecX << 0.0F, 0.0F, -M_PI / 2.0F; - vecU << 0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] - 0.5F, 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); - - // moving backward + oriented -90 degrees - vecX << 0.0F, 0.0F, -M_PI / 2.0F; - vecU << -0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] + 0.5F, 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); - - // moving forward + oriented 45 degrees - vecX << 0.0F, 0.0F, M_PI / 4.0F; - vecU << 0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2] + vecU[1], 0.0001F); - - // moving forward + oriented 45 degrees - vecX << 0.0F, 0.0F, -M_PI / 4.0F; - vecU << 0.5F, 0.0F; - - kalmanFilter.vecX() = vecX; - kalmanFilter.predictEkf(egoMotionModel, vecU); - - EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), 0.0001F); - EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2] + vecU[1], 0.0001F); + kf::KalmanFilter kalmanFilter; + kf::motionmodel::EgoMotionModel egoMotionModel; + kf::Vector vecX; + kf::Vector vecU; + + // moving forward + vecX << 0.0F, 0.0F, 0.0F; + vecU << 0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0] + 0.5F, 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1], 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); + + // moving backward/reverse + vecX << 0.0F, 0.0F, 0.0F; + vecU << -0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0] - 0.5F, 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1], 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); + + // moving forward + oriented 90 degrees + vecX << 0.0F, 0.0F, M_PI / 2.0F; + vecU << 0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] + 0.5F, 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); + + // moving backward + oriented 90 degrees + vecX << 0.0F, 0.0F, M_PI / 2.0F; + vecU << -0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] - 0.5F, 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); + + // moving forward + oriented -90 degrees + vecX << 0.0F, 0.0F, -M_PI / 2.0F; + vecU << 0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] - 0.5F, 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); + + // moving backward + oriented -90 degrees + vecX << 0.0F, 0.0F, -M_PI / 2.0F; + vecU << -0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], vecX[0], 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], vecX[1] + 0.5F, 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2], 0.0001F); + + // moving forward + oriented 45 degrees + vecX << 0.0F, 0.0F, M_PI / 4.0F; + vecU << 0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], + vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], + vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2] + vecU[1], 0.0001F); + + // moving forward + oriented 45 degrees + vecX << 0.0F, 0.0F, -M_PI / 4.0F; + vecU << 0.5F, 0.0F; + + kalmanFilter.vecX() = vecX; + kalmanFilter.predictEkf(egoMotionModel, vecU); + + EXPECT_NEAR(kalmanFilter.vecX()[0], + vecX[0] + vecU[0] * std::cos(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[1], + vecX[1] + vecU[0] * std::sin(vecX[2] + (vecU[1] / 2.0F)), + 0.0001F); + EXPECT_NEAR(kalmanFilter.vecX()[2], vecX[2] + vecU[1], 0.0001F); } diff --git a/tests/unit/unit_tests.cpp b/tests/unit/unit_tests.cpp index 3eb216a..86dac65 100644 --- a/tests/unit/unit_tests.cpp +++ b/tests/unit/unit_tests.cpp @@ -1,7 +1,7 @@ #include -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } \ No newline at end of file