diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index b9ecf6f3b4..79d489128f 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -2925,6 +2925,7 @@ class ShonanAveragingParameters2 { void setOptimalityThreshold(double value); double getOptimalityThreshold() const; void setAnchor(size_t index, const gtsam::Rot2& value); + pair getAnchor(); void setAnchorWeight(double value); double getAnchorWeight() const; void setKarcherWeight(double value); @@ -2940,6 +2941,7 @@ class ShonanAveragingParameters3 { void setOptimalityThreshold(double value); double getOptimalityThreshold() const; void setAnchor(size_t index, const gtsam::Rot3& value); + pair getAnchor(); void setAnchorWeight(double value); double getAnchorWeight() const; void setKarcherWeight(double value); diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index 2485418cfa..bec48f30d3 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -16,26 +16,25 @@ * @brief Shonan Averaging algorithm */ -#include - +#include #include #include #include #include #include +#include +#include #include #include #include -#include #include -#include - #include #include #include #include #include +#include #include namespace gtsam { @@ -50,8 +49,11 @@ template ShonanAveragingParameters::ShonanAveragingParameters( const LevenbergMarquardtParams &_lm, const std::string &method, double optimalityThreshold, double alpha, double beta, double gamma) - : lm(_lm), optimalityThreshold(optimalityThreshold), alpha(alpha), - beta(beta), gamma(gamma) { + : lm(_lm), + optimalityThreshold(optimalityThreshold), + alpha(alpha), + beta(beta), + gamma(gamma) { // By default, we will do conjugate gradient lm.linearSolverType = LevenbergMarquardtParams::Iterative; @@ -92,29 +94,40 @@ template struct ShonanAveragingParameters<3>; /* ************************************************************************* */ // Calculate number of unknown rotations referenced by measurements +// Throws exception of keys are not numbered as expected (may change in future). template -static size_t -NrUnknowns(const typename ShonanAveraging::Measurements &measurements) { +static size_t NrUnknowns( + const typename ShonanAveraging::Measurements &measurements) { + Key maxKey = 0; std::set keys; for (const auto &measurement : measurements) { - keys.insert(measurement.key1()); - keys.insert(measurement.key2()); + for (const Key &key : measurement.keys()) { + maxKey = std::max(key, maxKey); + keys.insert(key); + } } - return keys.size(); + size_t N = keys.size(); + if (maxKey != N - 1) { + throw std::invalid_argument( + "As of now, ShonanAveraging expects keys numbered 0..N-1"); + } + return N; } /* ************************************************************************* */ template ShonanAveraging::ShonanAveraging(const Measurements &measurements, const Parameters ¶meters) - : parameters_(parameters), measurements_(measurements), + : parameters_(parameters), + measurements_(measurements), nrUnknowns_(NrUnknowns(measurements)) { for (const auto &measurement : measurements_) { const auto &model = measurement.noiseModel(); if (model && model->dim() != SO::dimension) { measurement.print("Factor with incorrect noise model:\n"); - throw std::invalid_argument("ShonanAveraging: measurements passed to " - "constructor have incorrect dimension."); + throw std::invalid_argument( + "ShonanAveraging: measurements passed to " + "constructor have incorrect dimension."); } } Q_ = buildQ(); @@ -196,7 +209,7 @@ Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { Matrix S(p, N * d); for (const auto it : values.filter()) { S.middleCols(it.key * d) = - it.value.matrix().leftCols(); // project Qj to Stiefel + it.value.matrix().leftCols(); // project Qj to Stiefel } return S; } @@ -227,7 +240,8 @@ Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { } /* ************************************************************************* */ -template static Matrix RoundSolutionS(const Matrix &S) { +template +static Matrix RoundSolutionS(const Matrix &S) { const size_t N = S.cols() / d; // First, compute a thin SVD of S Eigen::JacobiSVD svd(S, Eigen::ComputeThinV); @@ -246,8 +260,7 @@ template static Matrix RoundSolutionS(const Matrix &S) { for (size_t i = 0; i < N; ++i) { // Compute the determinant of the ith dxd block of R double determinant = R.middleCols(d * i).determinant(); - if (determinant > 0) - ++numPositiveBlocks; + if (determinant > 0) ++numPositiveBlocks; } if (numPositiveBlocks < N / 2) { @@ -263,7 +276,8 @@ template static Matrix RoundSolutionS(const Matrix &S) { } /* ************************************************************************* */ -template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { +template <> +Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { // Round to a 2*2N matrix Matrix R = RoundSolutionS<2>(S); @@ -276,7 +290,8 @@ template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { return values; } -template <> Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { +template <> +Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { // Round to a 3*3N matrix Matrix R = RoundSolutionS<3>(S); @@ -332,7 +347,8 @@ static double Kappa(const BinaryMeasurement &measurement) { } /* ************************************************************************* */ -template Sparse ShonanAveraging::buildD() const { +template +Sparse ShonanAveraging::buildD() const { // Each measurement contributes 2*d elements along the diagonal of the // degree matrix. static constexpr size_t stride = 2 * d; @@ -366,7 +382,8 @@ template Sparse ShonanAveraging::buildD() const { } /* ************************************************************************* */ -template Sparse ShonanAveraging::buildQ() const { +template +Sparse ShonanAveraging::buildQ() const { // Each measurement contributes 2*d^2 elements on a pair of symmetric // off-diagonal blocks static constexpr size_t stride = 2 * d * d; @@ -513,12 +530,12 @@ struct MatrixProdFunctor { // - We've been using 10^-4 for the nonnegativity tolerance // - for numLanczosVectors, 20 is a good default value -static bool -SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, - Vector *minEigenVector = 0, size_t *numIterations = 0, - size_t maxIterations = 1000, - double minEigenvalueNonnegativityTolerance = 10e-4, - Eigen::Index numLanczosVectors = 20) { +static bool SparseMinimumEigenValue( + const Sparse &A, const Matrix &S, double *minEigenValue, + Vector *minEigenVector = 0, size_t *numIterations = 0, + size_t maxIterations = 1000, + double minEigenvalueNonnegativityTolerance = 10e-4, + Eigen::Index numLanczosVectors = 20) { // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos MatrixProdFunctor lmOperator(A); Spectra::SymEigsSolvernormalize(); // Ensure that this is a unit vector + minEigenVector->normalize(); // Ensure that this is a unit vector } return true; } @@ -578,7 +594,7 @@ SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, Vector perturbation(v0.size()); perturbation.setRandom(); perturbation.normalize(); - Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% + Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% // Use this to initialize the eigensolver minEigenValueSolver.init(xinit.data()); @@ -590,21 +606,20 @@ SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); - if (minConverged != 1) - return false; + if (minConverged != 1) return false; *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue; if (minEigenVector) { *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0); - minEigenVector->normalize(); // Ensure that this is a unit vector + minEigenVector->normalize(); // Ensure that this is a unit vector } - if (numIterations) - *numIterations = minEigenValueSolver.num_iterations(); + if (numIterations) *numIterations = minEigenValueSolver.num_iterations(); return true; } /* ************************************************************************* */ -template Sparse ShonanAveraging::computeA(const Matrix &S) const { +template +Sparse ShonanAveraging::computeA(const Matrix &S) const { auto Lambda = computeLambda(S); return Lambda - Q_; } @@ -628,8 +643,8 @@ double ShonanAveraging::computeMinEigenValue(const Values &values, /* ************************************************************************* */ template -std::pair -ShonanAveraging::computeMinEigenVector(const Values &values) const { +std::pair ShonanAveraging::computeMinEigenVector( + const Values &values) const { Vector minEigenVector; double minEigenValue = computeMinEigenValue(values, &minEigenVector); return std::make_pair(minEigenValue, minEigenVector); @@ -750,7 +765,8 @@ Values ShonanAveraging::initializeRandomly(std::mt19937 &rng) const { } /* ************************************************************************* */ -template Values ShonanAveraging::initializeRandomly() const { +template +Values ShonanAveraging::initializeRandomly() const { return initializeRandomly(kRandomNumberGenerator); } @@ -759,7 +775,7 @@ template Values ShonanAveraging::initializeRandomlyAt(size_t p, std::mt19937 &rng) const { const Values randomRotations = initializeRandomly(rng); - return LiftTo(p, randomRotations); // lift to p! + return LiftTo(p, randomRotations); // lift to p! } /* ************************************************************************* */ @@ -823,8 +839,8 @@ ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters ¶meters) // Extract Rot3 measurement from Pose3 betweenfactors // Modeled after similar function in dataset.cpp -static BinaryMeasurement -convert(const BetweenFactor::shared_ptr &f) { +static BinaryMeasurement convert( + const BetweenFactor::shared_ptr &f) { auto gaussian = boost::dynamic_pointer_cast(f->noiseModel()); if (!gaussian) @@ -837,12 +853,11 @@ convert(const BetweenFactor::shared_ptr &f) { noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); } -static ShonanAveraging3::Measurements -extractRot3Measurements(const BetweenFactorPose3s &factors) { +static ShonanAveraging3::Measurements extractRot3Measurements( + const BetweenFactorPose3s &factors) { ShonanAveraging3::Measurements result; result.reserve(factors.size()); - for (auto f : factors) - result.push_back(convert(f)); + for (auto f : factors) result.push_back(convert(f)); return result; } @@ -851,4 +866,4 @@ ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors, : ShonanAveraging<3>(extractRot3Measurements(factors), parameters) {} /* ************************************************************************* */ -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h index ed94329a27..97f92d1021 100644 --- a/gtsam/sfm/ShonanAveraging.h +++ b/gtsam/sfm/ShonanAveraging.h @@ -20,36 +20,38 @@ #include #include +#include #include #include #include #include #include -#include #include #include #include #include #include +#include namespace gtsam { class NonlinearFactorGraph; class LevenbergMarquardtOptimizer; /// Parameters governing optimization etc. -template struct GTSAM_EXPORT ShonanAveragingParameters { +template +struct GTSAM_EXPORT ShonanAveragingParameters { // Select Rot2 or Rot3 interface based template parameter d using Rot = typename std::conditional::type; using Anchor = std::pair; // Paremeters themselves: - LevenbergMarquardtParams lm; // LM parameters - double optimalityThreshold; // threshold used in checkOptimality - Anchor anchor; // pose to use as anchor if not Karcher - double alpha; // weight of anchor-based prior (default 0) - double beta; // weight of Karcher-based prior (default 1) - double gamma; // weight of gauge-fixing factors (default 0) + LevenbergMarquardtParams lm; // LM parameters + double optimalityThreshold; // threshold used in checkOptimality + Anchor anchor; // pose to use as anchor if not Karcher + double alpha; // weight of anchor-based prior (default 0) + double beta; // weight of Karcher-based prior (default 1) + double gamma; // weight of gauge-fixing factors (default 0) ShonanAveragingParameters(const LevenbergMarquardtParams &lm = LevenbergMarquardtParams::CeresDefaults(), @@ -64,6 +66,7 @@ template struct GTSAM_EXPORT ShonanAveragingParameters { double getOptimalityThreshold() const { return optimalityThreshold; } void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; } + std::pair getAnchor() { return anchor; } void setAnchorWeight(double value) { alpha = value; } double getAnchorWeight() { return alpha; } @@ -93,8 +96,9 @@ using ShonanAveragingParameters3 = ShonanAveragingParameters<3>; * European Computer Vision Conference, 2020. * You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0 */ -template class GTSAM_EXPORT ShonanAveraging { -public: +template +class GTSAM_EXPORT ShonanAveraging { + public: using Sparse = Eigen::SparseMatrix; // Define the Parameters type and use its typedef of the rotation type: @@ -105,13 +109,13 @@ template class GTSAM_EXPORT ShonanAveraging { // TODO(frank): use BinaryMeasurement? using Measurements = std::vector>; -private: + private: Parameters parameters_; Measurements measurements_; size_t nrUnknowns_; - Sparse D_; // Sparse (diagonal) degree matrix - Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr - Sparse L_; // connection Laplacian L = D - Q, needed for optimality check + Sparse D_; // Sparse (diagonal) degree matrix + Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr + Sparse L_; // connection Laplacian L = D - Q, needed for optimality check /** * Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as @@ -122,7 +126,7 @@ template class GTSAM_EXPORT ShonanAveraging { /// Build 3Nx3N sparse degree matrix D Sparse buildD() const; -public: + public: /// @name Standard Constructors /// @{ @@ -156,12 +160,12 @@ template class GTSAM_EXPORT ShonanAveraging { /// @name Matrix API (advanced use, debugging) /// @{ - Sparse D() const { return D_; } ///< Sparse version of D - Matrix denseD() const { return Matrix(D_); } ///< Dense version of D - Sparse Q() const { return Q_; } ///< Sparse version of Q - Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q - Sparse L() const { return L_; } ///< Sparse version of L - Matrix denseL() const { return Matrix(L_); } ///< Dense version of L + Sparse D() const { return D_; } ///< Sparse version of D + Matrix denseD() const { return Matrix(D_); } ///< Dense version of D + Sparse Q() const { return Q_; } ///< Sparse version of Q + Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q + Sparse L() const { return L_; } ///< Sparse version of L + Matrix denseL() const { return Matrix(L_); } ///< Dense version of L /// Version that takes pxdN Stiefel manifold elements Sparse computeLambda(const Matrix &S) const; @@ -220,11 +224,10 @@ template class GTSAM_EXPORT ShonanAveraging { * @param minEigenVector corresponding to minEigenValue at level p-1 * @return values of type SO(p) */ - Values - initializeWithDescent(size_t p, const Values &values, - const Vector &minEigenVector, double minEigenValue, - double gradienTolerance = 1e-2, - double preconditionedGradNormTolerance = 1e-4) const; + Values initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance = 1e-2, + double preconditionedGradNormTolerance = 1e-4) const; /// @} /// @name Advanced API /// @{ @@ -237,11 +240,11 @@ template class GTSAM_EXPORT ShonanAveraging { /** * Create initial Values of type SO(p) - * @param p the dimensionality of the rotation manifold + * @param p the dimensionality of the rotation manifold */ Values initializeRandomlyAt(size_t p, std::mt19937 &rng) const; - /// Version of initializeRandomlyAt with fixed random seed. + /// Version of initializeRandomlyAt with fixed random seed. Values initializeRandomlyAt(size_t p) const; /** @@ -300,7 +303,8 @@ template class GTSAM_EXPORT ShonanAveraging { Values roundSolution(const Values &values) const; /// Lift Values of type T to SO(p) - template static Values LiftTo(size_t p, const Values &values) { + template + static Values LiftTo(size_t p, const Values &values) { Values result; for (const auto it : values.filter()) { result.insert(it.key, SOn::Lift(p, it.value.matrix())); @@ -327,7 +331,7 @@ template class GTSAM_EXPORT ShonanAveraging { */ Values initializeRandomly(std::mt19937 &rng) const; - /// Random initialization for wrapper, fixed random seed. + /// Random initialization for wrapper, fixed random seed. Values initializeRandomly() const; /** @@ -346,20 +350,22 @@ template class GTSAM_EXPORT ShonanAveraging { // convenience interface with file access. class ShonanAveraging2 : public ShonanAveraging<2> { -public: + public: ShonanAveraging2(const Measurements &measurements, const Parameters ¶meters = Parameters()); - ShonanAveraging2(string g2oFile, const Parameters ¶meters = Parameters()); + explicit ShonanAveraging2(string g2oFile, + const Parameters ¶meters = Parameters()); }; class ShonanAveraging3 : public ShonanAveraging<3> { -public: + public: ShonanAveraging3(const Measurements &measurements, const Parameters ¶meters = Parameters()); - ShonanAveraging3(string g2oFile, const Parameters ¶meters = Parameters()); + explicit ShonanAveraging3(string g2oFile, + const Parameters ¶meters = Parameters()); // TODO(frank): Deprecate after we land pybind wrapper ShonanAveraging3(const BetweenFactorPose3s &factors, const Parameters ¶meters = Parameters()); }; -} // namespace gtsam +} // namespace gtsam